diff options
Diffstat (limited to 'drivers/gpu/drm/exynos')
53 files changed, 26371 insertions, 0 deletions
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig new file mode 100644 index 00000000000..178d2a9672a --- /dev/null +++ b/drivers/gpu/drm/exynos/Kconfig @@ -0,0 +1,98 @@ +config DRM_EXYNOS +	tristate "DRM Support for Samsung SoC EXYNOS Series" +	depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM) +	select DRM_KMS_HELPER +	select DRM_KMS_FB_HELPER +	select FB_CFB_FILLRECT +	select FB_CFB_COPYAREA +	select FB_CFB_IMAGEBLIT +	select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE +	select VIDEOMODE_HELPERS +	help +	  Choose this option if you have a Samsung SoC EXYNOS chipset. +	  If M is selected the module will be called exynosdrm. + +config DRM_EXYNOS_IOMMU +	bool "EXYNOS DRM IOMMU Support" +	depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU +	help +	  Choose this option if you want to use IOMMU feature for DRM. + +config DRM_EXYNOS_DMABUF +	bool "EXYNOS DRM DMABUF" +	depends on DRM_EXYNOS +	help +	  Choose this option if you want to use DMABUF feature for DRM. + +config DRM_EXYNOS_FIMD +	bool "Exynos DRM FIMD" +	depends on DRM_EXYNOS && !FB_S3C +	select FB_MODE_HELPERS +	help +	  Choose this option if you want to use Exynos FIMD for DRM. + +config DRM_EXYNOS_DPI +	bool "EXYNOS DRM parallel output support" +	depends on DRM_EXYNOS_FIMD +	select DRM_PANEL +	default n +	help +	  This enables support for Exynos parallel output. + +config DRM_EXYNOS_DSI +	bool "EXYNOS DRM MIPI-DSI driver support" +	depends on DRM_EXYNOS_FIMD +	select DRM_MIPI_DSI +	select DRM_PANEL +	default n +	help +	  This enables support for Exynos MIPI-DSI device. + +config DRM_EXYNOS_DP +	bool "EXYNOS DRM DP driver support" +	depends on DRM_EXYNOS_FIMD && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) +	default DRM_EXYNOS +	help +	  This enables support for DP device. + +config DRM_EXYNOS_HDMI +	bool "Exynos DRM HDMI" +	depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_TV +	help +	  Choose this option if you want to use Exynos HDMI for DRM. + +config DRM_EXYNOS_VIDI +	bool "Exynos DRM Virtual Display" +	depends on DRM_EXYNOS +	help +	  Choose this option if you want to use Exynos VIDI for DRM. + +config DRM_EXYNOS_G2D +	bool "Exynos DRM G2D" +	depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D +	help +	  Choose this option if you want to use Exynos G2D for DRM. + +config DRM_EXYNOS_IPP +	bool "Exynos DRM IPP" +	depends on DRM_EXYNOS +	help +	  Choose this option if you want to use IPP feature for DRM. + +config DRM_EXYNOS_FIMC +	bool "Exynos DRM FIMC" +	depends on DRM_EXYNOS_IPP && MFD_SYSCON +	help +	  Choose this option if you want to use Exynos FIMC for DRM. + +config DRM_EXYNOS_ROTATOR +	bool "Exynos DRM Rotator" +	depends on DRM_EXYNOS_IPP +	help +	  Choose this option if you want to use Exynos Rotator for DRM. + +config DRM_EXYNOS_GSC +	bool "Exynos DRM GSC" +	depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !ARCH_MULTIPLATFORM +	help +	  Choose this option if you want to use Exynos GSC for DRM. diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile new file mode 100644 index 00000000000..33ae3652b8d --- /dev/null +++ b/drivers/gpu/drm/exynos/Makefile @@ -0,0 +1,25 @@ +# +# Makefile for the drm device driver.  This driver provides support for the +# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. + +ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos +exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o \ +		exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \ +		exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \ +		exynos_drm_plane.o + +exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o +exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o +exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD)	+= exynos_drm_fimd.o +exynosdrm-$(CONFIG_DRM_EXYNOS_DPI)	+= exynos_drm_dpi.o +exynosdrm-$(CONFIG_DRM_EXYNOS_DSI)	+= exynos_drm_dsi.o +exynosdrm-$(CONFIG_DRM_EXYNOS_DP)	+= exynos_dp_core.o exynos_dp_reg.o +exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI)	+= exynos_hdmi.o exynos_mixer.o +exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI)	+= exynos_drm_vidi.o +exynosdrm-$(CONFIG_DRM_EXYNOS_G2D)	+= exynos_drm_g2d.o +exynosdrm-$(CONFIG_DRM_EXYNOS_IPP)	+= exynos_drm_ipp.o +exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC)	+= exynos_drm_fimc.o +exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR)	+= exynos_drm_rotator.o +exynosdrm-$(CONFIG_DRM_EXYNOS_GSC)	+= exynos_drm_gsc.o + +obj-$(CONFIG_DRM_EXYNOS)		+= exynosdrm.o diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c new file mode 100644 index 00000000000..a8ffc8c1477 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_dp_core.c @@ -0,0 +1,1393 @@ +/* + * Samsung SoC DP (Display Port) interface driver. + * + * Copyright (C) 2012 Samsung Electronics Co., Ltd. + * Author: Jingoo Han <jg1.han@samsung.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/err.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/gpio.h> +#include <linux/component.h> +#include <linux/phy/phy.h> +#include <video/of_display_timing.h> +#include <video/of_videomode.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/bridge/ptn3460.h> + +#include "exynos_drm_drv.h" +#include "exynos_dp_core.h" + +#define ctx_from_connector(c)	container_of(c, struct exynos_dp_device, \ +					connector) + +struct bridge_init { +	struct i2c_client *client; +	struct device_node *node; +}; + +static int exynos_dp_init_dp(struct exynos_dp_device *dp) +{ +	exynos_dp_reset(dp); + +	exynos_dp_swreset(dp); + +	exynos_dp_init_analog_param(dp); +	exynos_dp_init_interrupt(dp); + +	/* SW defined function Normal operation */ +	exynos_dp_enable_sw_function(dp); + +	exynos_dp_config_interrupt(dp); +	exynos_dp_init_analog_func(dp); + +	exynos_dp_init_hpd(dp); +	exynos_dp_init_aux(dp); + +	return 0; +} + +static int exynos_dp_detect_hpd(struct exynos_dp_device *dp) +{ +	int timeout_loop = 0; + +	while (exynos_dp_get_plug_in_status(dp) != 0) { +		timeout_loop++; +		if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) { +			dev_err(dp->dev, "failed to get hpd plug status\n"); +			return -ETIMEDOUT; +		} +		usleep_range(10, 11); +	} + +	return 0; +} + +static unsigned char exynos_dp_calc_edid_check_sum(unsigned char *edid_data) +{ +	int i; +	unsigned char sum = 0; + +	for (i = 0; i < EDID_BLOCK_LENGTH; i++) +		sum = sum + edid_data[i]; + +	return sum; +} + +static int exynos_dp_read_edid(struct exynos_dp_device *dp) +{ +	unsigned char edid[EDID_BLOCK_LENGTH * 2]; +	unsigned int extend_block = 0; +	unsigned char sum; +	unsigned char test_vector; +	int retval; + +	/* +	 * EDID device address is 0x50. +	 * However, if necessary, you must have set upper address +	 * into E-EDID in I2C device, 0x30. +	 */ + +	/* Read Extension Flag, Number of 128-byte EDID extension blocks */ +	retval = exynos_dp_read_byte_from_i2c(dp, I2C_EDID_DEVICE_ADDR, +				EDID_EXTENSION_FLAG, +				&extend_block); +	if (retval) +		return retval; + +	if (extend_block > 0) { +		dev_dbg(dp->dev, "EDID data includes a single extension!\n"); + +		/* Read EDID data */ +		retval = exynos_dp_read_bytes_from_i2c(dp, I2C_EDID_DEVICE_ADDR, +						EDID_HEADER_PATTERN, +						EDID_BLOCK_LENGTH, +						&edid[EDID_HEADER_PATTERN]); +		if (retval != 0) { +			dev_err(dp->dev, "EDID Read failed!\n"); +			return -EIO; +		} +		sum = exynos_dp_calc_edid_check_sum(edid); +		if (sum != 0) { +			dev_err(dp->dev, "EDID bad checksum!\n"); +			return -EIO; +		} + +		/* Read additional EDID data */ +		retval = exynos_dp_read_bytes_from_i2c(dp, +				I2C_EDID_DEVICE_ADDR, +				EDID_BLOCK_LENGTH, +				EDID_BLOCK_LENGTH, +				&edid[EDID_BLOCK_LENGTH]); +		if (retval != 0) { +			dev_err(dp->dev, "EDID Read failed!\n"); +			return -EIO; +		} +		sum = exynos_dp_calc_edid_check_sum(&edid[EDID_BLOCK_LENGTH]); +		if (sum != 0) { +			dev_err(dp->dev, "EDID bad checksum!\n"); +			return -EIO; +		} + +		exynos_dp_read_byte_from_dpcd(dp, DP_TEST_REQUEST, +					&test_vector); +		if (test_vector & DP_TEST_LINK_EDID_READ) { +			exynos_dp_write_byte_to_dpcd(dp, +				DP_TEST_EDID_CHECKSUM, +				edid[EDID_BLOCK_LENGTH + EDID_CHECKSUM]); +			exynos_dp_write_byte_to_dpcd(dp, +				DP_TEST_RESPONSE, +				DP_TEST_EDID_CHECKSUM_WRITE); +		} +	} else { +		dev_info(dp->dev, "EDID data does not include any extensions.\n"); + +		/* Read EDID data */ +		retval = exynos_dp_read_bytes_from_i2c(dp, +				I2C_EDID_DEVICE_ADDR, +				EDID_HEADER_PATTERN, +				EDID_BLOCK_LENGTH, +				&edid[EDID_HEADER_PATTERN]); +		if (retval != 0) { +			dev_err(dp->dev, "EDID Read failed!\n"); +			return -EIO; +		} +		sum = exynos_dp_calc_edid_check_sum(edid); +		if (sum != 0) { +			dev_err(dp->dev, "EDID bad checksum!\n"); +			return -EIO; +		} + +		exynos_dp_read_byte_from_dpcd(dp, +			DP_TEST_REQUEST, +			&test_vector); +		if (test_vector & DP_TEST_LINK_EDID_READ) { +			exynos_dp_write_byte_to_dpcd(dp, +				DP_TEST_EDID_CHECKSUM, +				edid[EDID_CHECKSUM]); +			exynos_dp_write_byte_to_dpcd(dp, +				DP_TEST_RESPONSE, +				DP_TEST_EDID_CHECKSUM_WRITE); +		} +	} + +	dev_err(dp->dev, "EDID Read success!\n"); +	return 0; +} + +static int exynos_dp_handle_edid(struct exynos_dp_device *dp) +{ +	u8 buf[12]; +	int i; +	int retval; + +	/* Read DPCD DP_DPCD_REV~RECEIVE_PORT1_CAP_1 */ +	retval = exynos_dp_read_bytes_from_dpcd(dp, DP_DPCD_REV, +				12, buf); +	if (retval) +		return retval; + +	/* Read EDID */ +	for (i = 0; i < 3; i++) { +		retval = exynos_dp_read_edid(dp); +		if (!retval) +			break; +	} + +	return retval; +} + +static void exynos_dp_enable_rx_to_enhanced_mode(struct exynos_dp_device *dp, +						bool enable) +{ +	u8 data; + +	exynos_dp_read_byte_from_dpcd(dp, DP_LANE_COUNT_SET, &data); + +	if (enable) +		exynos_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET, +			DP_LANE_COUNT_ENHANCED_FRAME_EN | +			DPCD_LANE_COUNT_SET(data)); +	else +		exynos_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET, +			DPCD_LANE_COUNT_SET(data)); +} + +static int exynos_dp_is_enhanced_mode_available(struct exynos_dp_device *dp) +{ +	u8 data; +	int retval; + +	exynos_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data); +	retval = DPCD_ENHANCED_FRAME_CAP(data); + +	return retval; +} + +static void exynos_dp_set_enhanced_mode(struct exynos_dp_device *dp) +{ +	u8 data; + +	data = exynos_dp_is_enhanced_mode_available(dp); +	exynos_dp_enable_rx_to_enhanced_mode(dp, data); +	exynos_dp_enable_enhanced_mode(dp, data); +} + +static void exynos_dp_training_pattern_dis(struct exynos_dp_device *dp) +{ +	exynos_dp_set_training_pattern(dp, DP_NONE); + +	exynos_dp_write_byte_to_dpcd(dp, +		DP_TRAINING_PATTERN_SET, +		DP_TRAINING_PATTERN_DISABLE); +} + +static void exynos_dp_set_lane_lane_pre_emphasis(struct exynos_dp_device *dp, +					int pre_emphasis, int lane) +{ +	switch (lane) { +	case 0: +		exynos_dp_set_lane0_pre_emphasis(dp, pre_emphasis); +		break; +	case 1: +		exynos_dp_set_lane1_pre_emphasis(dp, pre_emphasis); +		break; + +	case 2: +		exynos_dp_set_lane2_pre_emphasis(dp, pre_emphasis); +		break; + +	case 3: +		exynos_dp_set_lane3_pre_emphasis(dp, pre_emphasis); +		break; +	} +} + +static int exynos_dp_link_start(struct exynos_dp_device *dp) +{ +	u8 buf[4]; +	int lane, lane_count, pll_tries, retval; + +	lane_count = dp->link_train.lane_count; + +	dp->link_train.lt_state = CLOCK_RECOVERY; +	dp->link_train.eq_loop = 0; + +	for (lane = 0; lane < lane_count; lane++) +		dp->link_train.cr_loop[lane] = 0; + +	/* Set link rate and count as you want to establish*/ +	exynos_dp_set_link_bandwidth(dp, dp->link_train.link_rate); +	exynos_dp_set_lane_count(dp, dp->link_train.lane_count); + +	/* Setup RX configuration */ +	buf[0] = dp->link_train.link_rate; +	buf[1] = dp->link_train.lane_count; +	retval = exynos_dp_write_bytes_to_dpcd(dp, DP_LINK_BW_SET, +				2, buf); +	if (retval) +		return retval; + +	/* Set TX pre-emphasis to minimum */ +	for (lane = 0; lane < lane_count; lane++) +		exynos_dp_set_lane_lane_pre_emphasis(dp, +			PRE_EMPHASIS_LEVEL_0, lane); + +	/* Wait for PLL lock */ +	pll_tries = 0; +	while (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { +		if (pll_tries == DP_TIMEOUT_LOOP_COUNT) { +			dev_err(dp->dev, "Wait for PLL lock timed out\n"); +			return -ETIMEDOUT; +		} + +		pll_tries++; +		usleep_range(90, 120); +	} + +	/* Set training pattern 1 */ +	exynos_dp_set_training_pattern(dp, TRAINING_PTN1); + +	/* Set RX training pattern */ +	retval = exynos_dp_write_byte_to_dpcd(dp, +			DP_TRAINING_PATTERN_SET, +			DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_1); +	if (retval) +		return retval; + +	for (lane = 0; lane < lane_count; lane++) +		buf[lane] = DP_TRAIN_PRE_EMPHASIS_0 | +			    DP_TRAIN_VOLTAGE_SWING_400; + +	retval = exynos_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET, +			lane_count, buf); + +	return retval; +} + +static unsigned char exynos_dp_get_lane_status(u8 link_status[2], int lane) +{ +	int shift = (lane & 1) * 4; +	u8 link_value = link_status[lane>>1]; + +	return (link_value >> shift) & 0xf; +} + +static int exynos_dp_clock_recovery_ok(u8 link_status[2], int lane_count) +{ +	int lane; +	u8 lane_status; + +	for (lane = 0; lane < lane_count; lane++) { +		lane_status = exynos_dp_get_lane_status(link_status, lane); +		if ((lane_status & DP_LANE_CR_DONE) == 0) +			return -EINVAL; +	} +	return 0; +} + +static int exynos_dp_channel_eq_ok(u8 link_status[2], u8 link_align, +				int lane_count) +{ +	int lane; +	u8 lane_status; + +	if ((link_align & DP_INTERLANE_ALIGN_DONE) == 0) +		return -EINVAL; + +	for (lane = 0; lane < lane_count; lane++) { +		lane_status = exynos_dp_get_lane_status(link_status, lane); +		lane_status &= DP_CHANNEL_EQ_BITS; +		if (lane_status != DP_CHANNEL_EQ_BITS) +			return -EINVAL; +	} + +	return 0; +} + +static unsigned char exynos_dp_get_adjust_request_voltage(u8 adjust_request[2], +							int lane) +{ +	int shift = (lane & 1) * 4; +	u8 link_value = adjust_request[lane>>1]; + +	return (link_value >> shift) & 0x3; +} + +static unsigned char exynos_dp_get_adjust_request_pre_emphasis( +					u8 adjust_request[2], +					int lane) +{ +	int shift = (lane & 1) * 4; +	u8 link_value = adjust_request[lane>>1]; + +	return ((link_value >> shift) & 0xc) >> 2; +} + +static void exynos_dp_set_lane_link_training(struct exynos_dp_device *dp, +					u8 training_lane_set, int lane) +{ +	switch (lane) { +	case 0: +		exynos_dp_set_lane0_link_training(dp, training_lane_set); +		break; +	case 1: +		exynos_dp_set_lane1_link_training(dp, training_lane_set); +		break; + +	case 2: +		exynos_dp_set_lane2_link_training(dp, training_lane_set); +		break; + +	case 3: +		exynos_dp_set_lane3_link_training(dp, training_lane_set); +		break; +	} +} + +static unsigned int exynos_dp_get_lane_link_training( +				struct exynos_dp_device *dp, +				int lane) +{ +	u32 reg; + +	switch (lane) { +	case 0: +		reg = exynos_dp_get_lane0_link_training(dp); +		break; +	case 1: +		reg = exynos_dp_get_lane1_link_training(dp); +		break; +	case 2: +		reg = exynos_dp_get_lane2_link_training(dp); +		break; +	case 3: +		reg = exynos_dp_get_lane3_link_training(dp); +		break; +	default: +		WARN_ON(1); +		return 0; +	} + +	return reg; +} + +static void exynos_dp_reduce_link_rate(struct exynos_dp_device *dp) +{ +	exynos_dp_training_pattern_dis(dp); +	exynos_dp_set_enhanced_mode(dp); + +	dp->link_train.lt_state = FAILED; +} + +static void exynos_dp_get_adjust_training_lane(struct exynos_dp_device *dp, +					u8 adjust_request[2]) +{ +	int lane, lane_count; +	u8 voltage_swing, pre_emphasis, training_lane; + +	lane_count = dp->link_train.lane_count; +	for (lane = 0; lane < lane_count; lane++) { +		voltage_swing = exynos_dp_get_adjust_request_voltage( +						adjust_request, lane); +		pre_emphasis = exynos_dp_get_adjust_request_pre_emphasis( +						adjust_request, lane); +		training_lane = DPCD_VOLTAGE_SWING_SET(voltage_swing) | +				DPCD_PRE_EMPHASIS_SET(pre_emphasis); + +		if (voltage_swing == VOLTAGE_LEVEL_3) +			training_lane |= DP_TRAIN_MAX_SWING_REACHED; +		if (pre_emphasis == PRE_EMPHASIS_LEVEL_3) +			training_lane |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + +		dp->link_train.training_lane[lane] = training_lane; +	} +} + +static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp) +{ +	int lane, lane_count, retval; +	u8 voltage_swing, pre_emphasis, training_lane; +	u8 link_status[2], adjust_request[2]; + +	usleep_range(100, 101); + +	lane_count = dp->link_train.lane_count; + +	retval =  exynos_dp_read_bytes_from_dpcd(dp, +			DP_LANE0_1_STATUS, 2, link_status); +	if (retval) +		return retval; + +	retval =  exynos_dp_read_bytes_from_dpcd(dp, +			DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request); +	if (retval) +		return retval; + +	if (exynos_dp_clock_recovery_ok(link_status, lane_count) == 0) { +		/* set training pattern 2 for EQ */ +		exynos_dp_set_training_pattern(dp, TRAINING_PTN2); + +		retval = exynos_dp_write_byte_to_dpcd(dp, +				DP_TRAINING_PATTERN_SET, +				DP_LINK_SCRAMBLING_DISABLE | +				DP_TRAINING_PATTERN_2); +		if (retval) +			return retval; + +		dev_info(dp->dev, "Link Training Clock Recovery success\n"); +		dp->link_train.lt_state = EQUALIZER_TRAINING; +	} else { +		for (lane = 0; lane < lane_count; lane++) { +			training_lane = exynos_dp_get_lane_link_training( +							dp, lane); +			voltage_swing = exynos_dp_get_adjust_request_voltage( +							adjust_request, lane); +			pre_emphasis = exynos_dp_get_adjust_request_pre_emphasis( +							adjust_request, lane); + +			if (DPCD_VOLTAGE_SWING_GET(training_lane) == +					voltage_swing && +			    DPCD_PRE_EMPHASIS_GET(training_lane) == +					pre_emphasis) +				dp->link_train.cr_loop[lane]++; + +			if (dp->link_train.cr_loop[lane] == MAX_CR_LOOP || +			    voltage_swing == VOLTAGE_LEVEL_3 || +			    pre_emphasis == PRE_EMPHASIS_LEVEL_3) { +				dev_err(dp->dev, "CR Max reached (%d,%d,%d)\n", +					dp->link_train.cr_loop[lane], +					voltage_swing, pre_emphasis); +				exynos_dp_reduce_link_rate(dp); +				return -EIO; +			} +		} +	} + +	exynos_dp_get_adjust_training_lane(dp, adjust_request); + +	for (lane = 0; lane < lane_count; lane++) +		exynos_dp_set_lane_link_training(dp, +			dp->link_train.training_lane[lane], lane); + +	retval = exynos_dp_write_bytes_to_dpcd(dp, +			DP_TRAINING_LANE0_SET, lane_count, +			dp->link_train.training_lane); +	if (retval) +		return retval; + +	return retval; +} + +static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp) +{ +	int lane, lane_count, retval; +	u32 reg; +	u8 link_align, link_status[2], adjust_request[2]; + +	usleep_range(400, 401); + +	lane_count = dp->link_train.lane_count; + +	retval = exynos_dp_read_bytes_from_dpcd(dp, +			DP_LANE0_1_STATUS, 2, link_status); +	if (retval) +		return retval; + +	if (exynos_dp_clock_recovery_ok(link_status, lane_count)) { +		exynos_dp_reduce_link_rate(dp); +		return -EIO; +	} + +	retval = exynos_dp_read_bytes_from_dpcd(dp, +			DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request); +	if (retval) +		return retval; + +	retval = exynos_dp_read_byte_from_dpcd(dp, +			DP_LANE_ALIGN_STATUS_UPDATED, &link_align); +	if (retval) +		return retval; + +	exynos_dp_get_adjust_training_lane(dp, adjust_request); + +	if (!exynos_dp_channel_eq_ok(link_status, link_align, lane_count)) { +		/* traing pattern Set to Normal */ +		exynos_dp_training_pattern_dis(dp); + +		dev_info(dp->dev, "Link Training success!\n"); + +		exynos_dp_get_link_bandwidth(dp, ®); +		dp->link_train.link_rate = reg; +		dev_dbg(dp->dev, "final bandwidth = %.2x\n", +			dp->link_train.link_rate); + +		exynos_dp_get_lane_count(dp, ®); +		dp->link_train.lane_count = reg; +		dev_dbg(dp->dev, "final lane count = %.2x\n", +			dp->link_train.lane_count); + +		/* set enhanced mode if available */ +		exynos_dp_set_enhanced_mode(dp); +		dp->link_train.lt_state = FINISHED; + +		return 0; +	} + +	/* not all locked */ +	dp->link_train.eq_loop++; + +	if (dp->link_train.eq_loop > MAX_EQ_LOOP) { +		dev_err(dp->dev, "EQ Max loop\n"); +		exynos_dp_reduce_link_rate(dp); +		return -EIO; +	} + +	for (lane = 0; lane < lane_count; lane++) +		exynos_dp_set_lane_link_training(dp, +			dp->link_train.training_lane[lane], lane); + +	retval = exynos_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET, +			lane_count, dp->link_train.training_lane); + +	return retval; +} + +static void exynos_dp_get_max_rx_bandwidth(struct exynos_dp_device *dp, +					u8 *bandwidth) +{ +	u8 data; + +	/* +	 * For DP rev.1.1, Maximum link rate of Main Link lanes +	 * 0x06 = 1.62 Gbps, 0x0a = 2.7 Gbps +	 */ +	exynos_dp_read_byte_from_dpcd(dp, DP_MAX_LINK_RATE, &data); +	*bandwidth = data; +} + +static void exynos_dp_get_max_rx_lane_count(struct exynos_dp_device *dp, +					u8 *lane_count) +{ +	u8 data; + +	/* +	 * For DP rev.1.1, Maximum number of Main Link lanes +	 * 0x01 = 1 lane, 0x02 = 2 lanes, 0x04 = 4 lanes +	 */ +	exynos_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data); +	*lane_count = DPCD_MAX_LANE_COUNT(data); +} + +static void exynos_dp_init_training(struct exynos_dp_device *dp, +			enum link_lane_count_type max_lane, +			enum link_rate_type max_rate) +{ +	/* +	 * MACRO_RST must be applied after the PLL_LOCK to avoid +	 * the DP inter pair skew issue for at least 10 us +	 */ +	exynos_dp_reset_macro(dp); + +	/* Initialize by reading RX's DPCD */ +	exynos_dp_get_max_rx_bandwidth(dp, &dp->link_train.link_rate); +	exynos_dp_get_max_rx_lane_count(dp, &dp->link_train.lane_count); + +	if ((dp->link_train.link_rate != LINK_RATE_1_62GBPS) && +	   (dp->link_train.link_rate != LINK_RATE_2_70GBPS)) { +		dev_err(dp->dev, "Rx Max Link Rate is abnormal :%x !\n", +			dp->link_train.link_rate); +		dp->link_train.link_rate = LINK_RATE_1_62GBPS; +	} + +	if (dp->link_train.lane_count == 0) { +		dev_err(dp->dev, "Rx Max Lane count is abnormal :%x !\n", +			dp->link_train.lane_count); +		dp->link_train.lane_count = (u8)LANE_COUNT1; +	} + +	/* Setup TX lane count & rate */ +	if (dp->link_train.lane_count > max_lane) +		dp->link_train.lane_count = max_lane; +	if (dp->link_train.link_rate > max_rate) +		dp->link_train.link_rate = max_rate; + +	/* All DP analog module power up */ +	exynos_dp_set_analog_power_down(dp, POWER_ALL, 0); +} + +static int exynos_dp_sw_link_training(struct exynos_dp_device *dp) +{ +	int retval = 0, training_finished = 0; + +	dp->link_train.lt_state = START; + +	/* Process here */ +	while (!retval && !training_finished) { +		switch (dp->link_train.lt_state) { +		case START: +			retval = exynos_dp_link_start(dp); +			if (retval) +				dev_err(dp->dev, "LT link start failed!\n"); +			break; +		case CLOCK_RECOVERY: +			retval = exynos_dp_process_clock_recovery(dp); +			if (retval) +				dev_err(dp->dev, "LT CR failed!\n"); +			break; +		case EQUALIZER_TRAINING: +			retval = exynos_dp_process_equalizer_training(dp); +			if (retval) +				dev_err(dp->dev, "LT EQ failed!\n"); +			break; +		case FINISHED: +			training_finished = 1; +			break; +		case FAILED: +			return -EREMOTEIO; +		} +	} +	if (retval) +		dev_err(dp->dev, "eDP link training failed (%d)\n", retval); + +	return retval; +} + +static int exynos_dp_set_link_train(struct exynos_dp_device *dp, +				u32 count, +				u32 bwtype) +{ +	int i; +	int retval; + +	for (i = 0; i < DP_TIMEOUT_LOOP_COUNT; i++) { +		exynos_dp_init_training(dp, count, bwtype); +		retval = exynos_dp_sw_link_training(dp); +		if (retval == 0) +			break; + +		usleep_range(100, 110); +	} + +	return retval; +} + +static int exynos_dp_config_video(struct exynos_dp_device *dp) +{ +	int retval = 0; +	int timeout_loop = 0; +	int done_count = 0; + +	exynos_dp_config_video_slave_mode(dp); + +	exynos_dp_set_video_color_format(dp); + +	if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { +		dev_err(dp->dev, "PLL is not locked yet.\n"); +		return -EINVAL; +	} + +	for (;;) { +		timeout_loop++; +		if (exynos_dp_is_slave_video_stream_clock_on(dp) == 0) +			break; +		if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) { +			dev_err(dp->dev, "Timeout of video streamclk ok\n"); +			return -ETIMEDOUT; +		} + +		usleep_range(1, 2); +	} + +	/* Set to use the register calculated M/N video */ +	exynos_dp_set_video_cr_mn(dp, CALCULATED_M, 0, 0); + +	/* For video bist, Video timing must be generated by register */ +	exynos_dp_set_video_timing_mode(dp, VIDEO_TIMING_FROM_CAPTURE); + +	/* Disable video mute */ +	exynos_dp_enable_video_mute(dp, 0); + +	/* Configure video slave mode */ +	exynos_dp_enable_video_master(dp, 0); + +	/* Enable video */ +	exynos_dp_start_video(dp); + +	timeout_loop = 0; + +	for (;;) { +		timeout_loop++; +		if (exynos_dp_is_video_stream_on(dp) == 0) { +			done_count++; +			if (done_count > 10) +				break; +		} else if (done_count) { +			done_count = 0; +		} +		if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) { +			dev_err(dp->dev, "Timeout of video streamclk ok\n"); +			return -ETIMEDOUT; +		} + +		usleep_range(1000, 1001); +	} + +	if (retval != 0) +		dev_err(dp->dev, "Video stream is not detected!\n"); + +	return retval; +} + +static void exynos_dp_enable_scramble(struct exynos_dp_device *dp, bool enable) +{ +	u8 data; + +	if (enable) { +		exynos_dp_enable_scrambling(dp); + +		exynos_dp_read_byte_from_dpcd(dp, +			DP_TRAINING_PATTERN_SET, +			&data); +		exynos_dp_write_byte_to_dpcd(dp, +			DP_TRAINING_PATTERN_SET, +			(u8)(data & ~DP_LINK_SCRAMBLING_DISABLE)); +	} else { +		exynos_dp_disable_scrambling(dp); + +		exynos_dp_read_byte_from_dpcd(dp, +			DP_TRAINING_PATTERN_SET, +			&data); +		exynos_dp_write_byte_to_dpcd(dp, +			DP_TRAINING_PATTERN_SET, +			(u8)(data | DP_LINK_SCRAMBLING_DISABLE)); +	} +} + +static irqreturn_t exynos_dp_irq_handler(int irq, void *arg) +{ +	struct exynos_dp_device *dp = arg; + +	enum dp_irq_type irq_type; + +	irq_type = exynos_dp_get_irq_type(dp); +	switch (irq_type) { +	case DP_IRQ_TYPE_HP_CABLE_IN: +		dev_dbg(dp->dev, "Received irq - cable in\n"); +		schedule_work(&dp->hotplug_work); +		exynos_dp_clear_hotplug_interrupts(dp); +		break; +	case DP_IRQ_TYPE_HP_CABLE_OUT: +		dev_dbg(dp->dev, "Received irq - cable out\n"); +		exynos_dp_clear_hotplug_interrupts(dp); +		break; +	case DP_IRQ_TYPE_HP_CHANGE: +		/* +		 * We get these change notifications once in a while, but there +		 * is nothing we can do with them. Just ignore it for now and +		 * only handle cable changes. +		 */ +		dev_dbg(dp->dev, "Received irq - hotplug change; ignoring.\n"); +		exynos_dp_clear_hotplug_interrupts(dp); +		break; +	default: +		dev_err(dp->dev, "Received irq - unknown type!\n"); +		break; +	} +	return IRQ_HANDLED; +} + +static void exynos_dp_hotplug(struct work_struct *work) +{ +	struct exynos_dp_device *dp; +	int ret; + +	dp = container_of(work, struct exynos_dp_device, hotplug_work); + +	ret = exynos_dp_detect_hpd(dp); +	if (ret) { +		/* Cable has been disconnected, we're done */ +		return; +	} + +	ret = exynos_dp_handle_edid(dp); +	if (ret) { +		dev_err(dp->dev, "unable to handle edid\n"); +		return; +	} + +	ret = exynos_dp_set_link_train(dp, dp->video_info->lane_count, +					dp->video_info->link_rate); +	if (ret) { +		dev_err(dp->dev, "unable to do link train\n"); +		return; +	} + +	exynos_dp_enable_scramble(dp, 1); +	exynos_dp_enable_rx_to_enhanced_mode(dp, 1); +	exynos_dp_enable_enhanced_mode(dp, 1); + +	exynos_dp_set_lane_count(dp, dp->video_info->lane_count); +	exynos_dp_set_link_bandwidth(dp, dp->video_info->link_rate); + +	exynos_dp_init_video(dp); +	ret = exynos_dp_config_video(dp); +	if (ret) +		dev_err(dp->dev, "unable to config video\n"); +} + +static enum drm_connector_status exynos_dp_detect( +				struct drm_connector *connector, bool force) +{ +	return connector_status_connected; +} + +static void exynos_dp_connector_destroy(struct drm_connector *connector) +{ +} + +static struct drm_connector_funcs exynos_dp_connector_funcs = { +	.dpms = drm_helper_connector_dpms, +	.fill_modes = drm_helper_probe_single_connector_modes, +	.detect = exynos_dp_detect, +	.destroy = exynos_dp_connector_destroy, +}; + +static int exynos_dp_get_modes(struct drm_connector *connector) +{ +	struct exynos_dp_device *dp = ctx_from_connector(connector); +	struct drm_display_mode *mode; + +	mode = drm_mode_create(connector->dev); +	if (!mode) { +		DRM_ERROR("failed to create a new display mode.\n"); +		return 0; +	} + +	drm_display_mode_from_videomode(&dp->panel.vm, mode); +	mode->width_mm = dp->panel.width_mm; +	mode->height_mm = dp->panel.height_mm; +	connector->display_info.width_mm = mode->width_mm; +	connector->display_info.height_mm = mode->height_mm; + +	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; +	drm_mode_set_name(mode); +	drm_mode_probed_add(connector, mode); + +	return 1; +} + +static struct drm_encoder *exynos_dp_best_encoder( +			struct drm_connector *connector) +{ +	struct exynos_dp_device *dp = ctx_from_connector(connector); + +	return dp->encoder; +} + +static struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = { +	.get_modes = exynos_dp_get_modes, +	.best_encoder = exynos_dp_best_encoder, +}; + +static bool find_bridge(const char *compat, struct bridge_init *bridge) +{ +	bridge->client = NULL; +	bridge->node = of_find_compatible_node(NULL, NULL, compat); +	if (!bridge->node) +		return false; + +	bridge->client = of_find_i2c_device_by_node(bridge->node); +	if (!bridge->client) +		return false; + +	return true; +} + +/* returns the number of bridges attached */ +static int exynos_drm_attach_lcd_bridge(struct drm_device *dev, +		struct drm_encoder *encoder) +{ +	struct bridge_init bridge; +	int ret; + +	if (find_bridge("nxp,ptn3460", &bridge)) { +		ret = ptn3460_init(dev, encoder, bridge.client, bridge.node); +		if (!ret) +			return 1; +	} +	return 0; +} + +static int exynos_dp_create_connector(struct exynos_drm_display *display, +				struct drm_encoder *encoder) +{ +	struct exynos_dp_device *dp = display->ctx; +	struct drm_connector *connector = &dp->connector; +	int ret; + +	dp->encoder = encoder; + +	/* Pre-empt DP connector creation if there's a bridge */ +	ret = exynos_drm_attach_lcd_bridge(dp->drm_dev, encoder); +	if (ret) +		return 0; + +	connector->polled = DRM_CONNECTOR_POLL_HPD; + +	ret = drm_connector_init(dp->drm_dev, connector, +			&exynos_dp_connector_funcs, DRM_MODE_CONNECTOR_eDP); +	if (ret) { +		DRM_ERROR("Failed to initialize connector with drm\n"); +		return ret; +	} + +	drm_connector_helper_add(connector, &exynos_dp_connector_helper_funcs); +	drm_sysfs_connector_add(connector); +	drm_mode_connector_attach_encoder(connector, encoder); + +	return 0; +} + +static void exynos_dp_phy_init(struct exynos_dp_device *dp) +{ +	if (dp->phy) { +		phy_power_on(dp->phy); +	} else if (dp->phy_addr) { +		u32 reg; + +		reg = __raw_readl(dp->phy_addr); +		reg |= dp->enable_mask; +		__raw_writel(reg, dp->phy_addr); +	} +} + +static void exynos_dp_phy_exit(struct exynos_dp_device *dp) +{ +	if (dp->phy) { +		phy_power_off(dp->phy); +	} else if (dp->phy_addr) { +		u32 reg; + +		reg = __raw_readl(dp->phy_addr); +		reg &= ~(dp->enable_mask); +		__raw_writel(reg, dp->phy_addr); +	} +} + +static void exynos_dp_poweron(struct exynos_dp_device *dp) +{ +	if (dp->dpms_mode == DRM_MODE_DPMS_ON) +		return; + +	clk_prepare_enable(dp->clock); +	exynos_dp_phy_init(dp); +	exynos_dp_init_dp(dp); +	enable_irq(dp->irq); +} + +static void exynos_dp_poweroff(struct exynos_dp_device *dp) +{ +	if (dp->dpms_mode != DRM_MODE_DPMS_ON) +		return; + +	disable_irq(dp->irq); +	flush_work(&dp->hotplug_work); +	exynos_dp_phy_exit(dp); +	clk_disable_unprepare(dp->clock); +} + +static void exynos_dp_dpms(struct exynos_drm_display *display, int mode) +{ +	struct exynos_dp_device *dp = display->ctx; + +	switch (mode) { +	case DRM_MODE_DPMS_ON: +		exynos_dp_poweron(dp); +		break; +	case DRM_MODE_DPMS_STANDBY: +	case DRM_MODE_DPMS_SUSPEND: +	case DRM_MODE_DPMS_OFF: +		exynos_dp_poweroff(dp); +		break; +	default: +		break; +	} +	dp->dpms_mode = mode; +} + +static struct exynos_drm_display_ops exynos_dp_display_ops = { +	.create_connector = exynos_dp_create_connector, +	.dpms = exynos_dp_dpms, +}; + +static struct exynos_drm_display exynos_dp_display = { +	.type = EXYNOS_DISPLAY_TYPE_LCD, +	.ops = &exynos_dp_display_ops, +}; + +static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev) +{ +	struct device_node *dp_node = dev->of_node; +	struct video_info *dp_video_config; + +	dp_video_config = devm_kzalloc(dev, +				sizeof(*dp_video_config), GFP_KERNEL); +	if (!dp_video_config) +		return ERR_PTR(-ENOMEM); + +	dp_video_config->h_sync_polarity = +		of_property_read_bool(dp_node, "hsync-active-high"); + +	dp_video_config->v_sync_polarity = +		of_property_read_bool(dp_node, "vsync-active-high"); + +	dp_video_config->interlaced = +		of_property_read_bool(dp_node, "interlaced"); + +	if (of_property_read_u32(dp_node, "samsung,color-space", +				&dp_video_config->color_space)) { +		dev_err(dev, "failed to get color-space\n"); +		return ERR_PTR(-EINVAL); +	} + +	if (of_property_read_u32(dp_node, "samsung,dynamic-range", +				&dp_video_config->dynamic_range)) { +		dev_err(dev, "failed to get dynamic-range\n"); +		return ERR_PTR(-EINVAL); +	} + +	if (of_property_read_u32(dp_node, "samsung,ycbcr-coeff", +				&dp_video_config->ycbcr_coeff)) { +		dev_err(dev, "failed to get ycbcr-coeff\n"); +		return ERR_PTR(-EINVAL); +	} + +	if (of_property_read_u32(dp_node, "samsung,color-depth", +				&dp_video_config->color_depth)) { +		dev_err(dev, "failed to get color-depth\n"); +		return ERR_PTR(-EINVAL); +	} + +	if (of_property_read_u32(dp_node, "samsung,link-rate", +				&dp_video_config->link_rate)) { +		dev_err(dev, "failed to get link-rate\n"); +		return ERR_PTR(-EINVAL); +	} + +	if (of_property_read_u32(dp_node, "samsung,lane-count", +				&dp_video_config->lane_count)) { +		dev_err(dev, "failed to get lane-count\n"); +		return ERR_PTR(-EINVAL); +	} + +	return dp_video_config; +} + +static int exynos_dp_dt_parse_phydata(struct exynos_dp_device *dp) +{ +	struct device_node *dp_phy_node = of_node_get(dp->dev->of_node); +	u32 phy_base; +	int ret = 0; + +	dp_phy_node = of_find_node_by_name(dp_phy_node, "dptx-phy"); +	if (!dp_phy_node) { +		dp->phy = devm_phy_get(dp->dev, "dp"); +		return PTR_ERR_OR_ZERO(dp->phy); +	} + +	if (of_property_read_u32(dp_phy_node, "reg", &phy_base)) { +		dev_err(dp->dev, "failed to get reg for dptx-phy\n"); +		ret = -EINVAL; +		goto err; +	} + +	if (of_property_read_u32(dp_phy_node, "samsung,enable-mask", +				&dp->enable_mask)) { +		dev_err(dp->dev, "failed to get enable-mask for dptx-phy\n"); +		ret = -EINVAL; +		goto err; +	} + +	dp->phy_addr = ioremap(phy_base, SZ_4); +	if (!dp->phy_addr) { +		dev_err(dp->dev, "failed to ioremap dp-phy\n"); +		ret = -ENOMEM; +		goto err; +	} + +err: +	of_node_put(dp_phy_node); + +	return ret; +} + +static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp) +{ +	int ret; + +	ret = of_get_videomode(dp->dev->of_node, &dp->panel.vm, +			OF_USE_NATIVE_MODE); +	if (ret) { +		DRM_ERROR("failed: of_get_videomode() : %d\n", ret); +		return ret; +	} +	return 0; +} + +static int exynos_dp_bind(struct device *dev, struct device *master, void *data) +{ +	struct platform_device *pdev = to_platform_device(dev); +	struct drm_device *drm_dev = data; +	struct resource *res; +	struct exynos_dp_device *dp; +	unsigned int irq_flags; + +	int ret = 0; + +	dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device), +				GFP_KERNEL); +	if (!dp) +		return -ENOMEM; + +	dp->dev = &pdev->dev; +	dp->dpms_mode = DRM_MODE_DPMS_OFF; + +	dp->video_info = exynos_dp_dt_parse_pdata(&pdev->dev); +	if (IS_ERR(dp->video_info)) +		return PTR_ERR(dp->video_info); + +	ret = exynos_dp_dt_parse_phydata(dp); +	if (ret) +		return ret; + +	ret = exynos_dp_dt_parse_panel(dp); +	if (ret) +		return ret; + +	dp->clock = devm_clk_get(&pdev->dev, "dp"); +	if (IS_ERR(dp->clock)) { +		dev_err(&pdev->dev, "failed to get clock\n"); +		return PTR_ERR(dp->clock); +	} + +	clk_prepare_enable(dp->clock); + +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + +	dp->reg_base = devm_ioremap_resource(&pdev->dev, res); +	if (IS_ERR(dp->reg_base)) +		return PTR_ERR(dp->reg_base); + +	dp->hpd_gpio = of_get_named_gpio(dev->of_node, "samsung,hpd-gpio", 0); + +	if (gpio_is_valid(dp->hpd_gpio)) { +		/* +		 * Set up the hotplug GPIO from the device tree as an interrupt. +		 * Simply specifying a different interrupt in the device tree +		 * doesn't work since we handle hotplug rather differently when +		 * using a GPIO.  We also need the actual GPIO specifier so +		 * that we can get the current state of the GPIO. +		 */ +		ret = devm_gpio_request_one(&pdev->dev, dp->hpd_gpio, GPIOF_IN, +					    "hpd_gpio"); +		if (ret) { +			dev_err(&pdev->dev, "failed to get hpd gpio\n"); +			return ret; +		} +		dp->irq = gpio_to_irq(dp->hpd_gpio); +		irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING; +	} else { +		dp->hpd_gpio = -ENODEV; +		dp->irq = platform_get_irq(pdev, 0); +		irq_flags = 0; +	} + +	if (dp->irq == -ENXIO) { +		dev_err(&pdev->dev, "failed to get irq\n"); +		return -ENODEV; +	} + +	INIT_WORK(&dp->hotplug_work, exynos_dp_hotplug); + +	exynos_dp_phy_init(dp); + +	exynos_dp_init_dp(dp); + +	ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler, +			irq_flags, "exynos-dp", dp); +	if (ret) { +		dev_err(&pdev->dev, "failed to request irq\n"); +		return ret; +	} +	disable_irq(dp->irq); + +	dp->drm_dev = drm_dev; +	exynos_dp_display.ctx = dp; + +	platform_set_drvdata(pdev, &exynos_dp_display); + +	return exynos_drm_create_enc_conn(drm_dev, &exynos_dp_display); +} + +static void exynos_dp_unbind(struct device *dev, struct device *master, +				void *data) +{ +	struct exynos_drm_display *display = dev_get_drvdata(dev); +	struct exynos_dp_device *dp = display->ctx; +	struct drm_encoder *encoder = dp->encoder; + +	exynos_dp_dpms(display, DRM_MODE_DPMS_OFF); + +	encoder->funcs->destroy(encoder); +	drm_connector_cleanup(&dp->connector); +} + +static const struct component_ops exynos_dp_ops = { +	.bind	= exynos_dp_bind, +	.unbind	= exynos_dp_unbind, +}; + +static int exynos_dp_probe(struct platform_device *pdev) +{ +	int ret; + +	ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR, +					exynos_dp_display.type); +	if (ret) +		return ret; + +	ret = component_add(&pdev->dev, &exynos_dp_ops); +	if (ret) +		exynos_drm_component_del(&pdev->dev, +						EXYNOS_DEVICE_TYPE_CONNECTOR); + +	return ret; +} + +static int exynos_dp_remove(struct platform_device *pdev) +{ +	component_del(&pdev->dev, &exynos_dp_ops); +	exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR); + +	return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int exynos_dp_suspend(struct device *dev) +{ +	struct platform_device *pdev = to_platform_device(dev); +	struct exynos_drm_display *display = platform_get_drvdata(pdev); + +	exynos_dp_dpms(display, DRM_MODE_DPMS_OFF); +	return 0; +} + +static int exynos_dp_resume(struct device *dev) +{ +	struct platform_device *pdev = to_platform_device(dev); +	struct exynos_drm_display *display = platform_get_drvdata(pdev); + +	exynos_dp_dpms(display, DRM_MODE_DPMS_ON); +	return 0; +} +#endif + +static const struct dev_pm_ops exynos_dp_pm_ops = { +	SET_SYSTEM_SLEEP_PM_OPS(exynos_dp_suspend, exynos_dp_resume) +}; + +static const struct of_device_id exynos_dp_match[] = { +	{ .compatible = "samsung,exynos5-dp" }, +	{}, +}; + +struct platform_driver dp_driver = { +	.probe		= exynos_dp_probe, +	.remove		= exynos_dp_remove, +	.driver		= { +		.name	= "exynos-dp", +		.owner	= THIS_MODULE, +		.pm	= &exynos_dp_pm_ops, +		.of_match_table = exynos_dp_match, +	}, +}; + +MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>"); +MODULE_DESCRIPTION("Samsung SoC DP Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h new file mode 100644 index 00000000000..02cc4f9ab90 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_dp_core.h @@ -0,0 +1,279 @@ +/* + * Header file for Samsung DP (Display Port) interface driver. + * + * Copyright (C) 2012 Samsung Electronics Co., Ltd. + * Author: Jingoo Han <jg1.han@samsung.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _EXYNOS_DP_CORE_H +#define _EXYNOS_DP_CORE_H + +#include <drm/drm_crtc.h> +#include <drm/drm_dp_helper.h> +#include <drm/exynos_drm.h> + +#define DP_TIMEOUT_LOOP_COUNT 100 +#define MAX_CR_LOOP 5 +#define MAX_EQ_LOOP 5 + +enum link_rate_type { +	LINK_RATE_1_62GBPS = 0x06, +	LINK_RATE_2_70GBPS = 0x0a +}; + +enum link_lane_count_type { +	LANE_COUNT1 = 1, +	LANE_COUNT2 = 2, +	LANE_COUNT4 = 4 +}; + +enum link_training_state { +	START, +	CLOCK_RECOVERY, +	EQUALIZER_TRAINING, +	FINISHED, +	FAILED +}; + +enum voltage_swing_level { +	VOLTAGE_LEVEL_0, +	VOLTAGE_LEVEL_1, +	VOLTAGE_LEVEL_2, +	VOLTAGE_LEVEL_3, +}; + +enum pre_emphasis_level { +	PRE_EMPHASIS_LEVEL_0, +	PRE_EMPHASIS_LEVEL_1, +	PRE_EMPHASIS_LEVEL_2, +	PRE_EMPHASIS_LEVEL_3, +}; + +enum pattern_set { +	PRBS7, +	D10_2, +	TRAINING_PTN1, +	TRAINING_PTN2, +	DP_NONE +}; + +enum color_space { +	COLOR_RGB, +	COLOR_YCBCR422, +	COLOR_YCBCR444 +}; + +enum color_depth { +	COLOR_6, +	COLOR_8, +	COLOR_10, +	COLOR_12 +}; + +enum color_coefficient { +	COLOR_YCBCR601, +	COLOR_YCBCR709 +}; + +enum dynamic_range { +	VESA, +	CEA +}; + +enum pll_status { +	PLL_UNLOCKED, +	PLL_LOCKED +}; + +enum clock_recovery_m_value_type { +	CALCULATED_M, +	REGISTER_M +}; + +enum video_timing_recognition_type { +	VIDEO_TIMING_FROM_CAPTURE, +	VIDEO_TIMING_FROM_REGISTER +}; + +enum analog_power_block { +	AUX_BLOCK, +	CH0_BLOCK, +	CH1_BLOCK, +	CH2_BLOCK, +	CH3_BLOCK, +	ANALOG_TOTAL, +	POWER_ALL +}; + +enum dp_irq_type { +	DP_IRQ_TYPE_HP_CABLE_IN, +	DP_IRQ_TYPE_HP_CABLE_OUT, +	DP_IRQ_TYPE_HP_CHANGE, +	DP_IRQ_TYPE_UNKNOWN, +}; + +struct video_info { +	char *name; + +	bool h_sync_polarity; +	bool v_sync_polarity; +	bool interlaced; + +	enum color_space color_space; +	enum dynamic_range dynamic_range; +	enum color_coefficient ycbcr_coeff; +	enum color_depth color_depth; + +	enum link_rate_type link_rate; +	enum link_lane_count_type lane_count; +}; + +struct link_train { +	int eq_loop; +	int cr_loop[4]; + +	u8 link_rate; +	u8 lane_count; +	u8 training_lane[4]; + +	enum link_training_state lt_state; +}; + +struct exynos_dp_device { +	struct device		*dev; +	struct drm_device	*drm_dev; +	struct drm_connector	connector; +	struct drm_encoder	*encoder; +	struct clk		*clock; +	unsigned int		irq; +	void __iomem		*reg_base; +	void __iomem		*phy_addr; +	unsigned int		enable_mask; + +	struct video_info	*video_info; +	struct link_train	link_train; +	struct work_struct	hotplug_work; +	struct phy		*phy; +	int			dpms_mode; +	int			hpd_gpio; + +	struct exynos_drm_panel_info panel; +}; + +/* exynos_dp_reg.c */ +void exynos_dp_enable_video_mute(struct exynos_dp_device *dp, bool enable); +void exynos_dp_stop_video(struct exynos_dp_device *dp); +void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable); +void exynos_dp_init_analog_param(struct exynos_dp_device *dp); +void exynos_dp_init_interrupt(struct exynos_dp_device *dp); +void exynos_dp_reset(struct exynos_dp_device *dp); +void exynos_dp_swreset(struct exynos_dp_device *dp); +void exynos_dp_config_interrupt(struct exynos_dp_device *dp); +enum pll_status exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp); +void exynos_dp_set_pll_power_down(struct exynos_dp_device *dp, bool enable); +void exynos_dp_set_analog_power_down(struct exynos_dp_device *dp, +				enum analog_power_block block, +				bool enable); +void exynos_dp_init_analog_func(struct exynos_dp_device *dp); +void exynos_dp_init_hpd(struct exynos_dp_device *dp); +enum dp_irq_type exynos_dp_get_irq_type(struct exynos_dp_device *dp); +void exynos_dp_clear_hotplug_interrupts(struct exynos_dp_device *dp); +void exynos_dp_reset_aux(struct exynos_dp_device *dp); +void exynos_dp_init_aux(struct exynos_dp_device *dp); +int exynos_dp_get_plug_in_status(struct exynos_dp_device *dp); +void exynos_dp_enable_sw_function(struct exynos_dp_device *dp); +int exynos_dp_start_aux_transaction(struct exynos_dp_device *dp); +int exynos_dp_write_byte_to_dpcd(struct exynos_dp_device *dp, +				unsigned int reg_addr, +				unsigned char data); +int exynos_dp_read_byte_from_dpcd(struct exynos_dp_device *dp, +				unsigned int reg_addr, +				unsigned char *data); +int exynos_dp_write_bytes_to_dpcd(struct exynos_dp_device *dp, +				unsigned int reg_addr, +				unsigned int count, +				unsigned char data[]); +int exynos_dp_read_bytes_from_dpcd(struct exynos_dp_device *dp, +				unsigned int reg_addr, +				unsigned int count, +				unsigned char data[]); +int exynos_dp_select_i2c_device(struct exynos_dp_device *dp, +				unsigned int device_addr, +				unsigned int reg_addr); +int exynos_dp_read_byte_from_i2c(struct exynos_dp_device *dp, +				unsigned int device_addr, +				unsigned int reg_addr, +				unsigned int *data); +int exynos_dp_read_bytes_from_i2c(struct exynos_dp_device *dp, +				unsigned int device_addr, +				unsigned int reg_addr, +				unsigned int count, +				unsigned char edid[]); +void exynos_dp_set_link_bandwidth(struct exynos_dp_device *dp, u32 bwtype); +void exynos_dp_get_link_bandwidth(struct exynos_dp_device *dp, u32 *bwtype); +void exynos_dp_set_lane_count(struct exynos_dp_device *dp, u32 count); +void exynos_dp_get_lane_count(struct exynos_dp_device *dp, u32 *count); +void exynos_dp_enable_enhanced_mode(struct exynos_dp_device *dp, bool enable); +void exynos_dp_set_training_pattern(struct exynos_dp_device *dp, +				 enum pattern_set pattern); +void exynos_dp_set_lane0_pre_emphasis(struct exynos_dp_device *dp, u32 level); +void exynos_dp_set_lane1_pre_emphasis(struct exynos_dp_device *dp, u32 level); +void exynos_dp_set_lane2_pre_emphasis(struct exynos_dp_device *dp, u32 level); +void exynos_dp_set_lane3_pre_emphasis(struct exynos_dp_device *dp, u32 level); +void exynos_dp_set_lane0_link_training(struct exynos_dp_device *dp, +				u32 training_lane); +void exynos_dp_set_lane1_link_training(struct exynos_dp_device *dp, +				u32 training_lane); +void exynos_dp_set_lane2_link_training(struct exynos_dp_device *dp, +				u32 training_lane); +void exynos_dp_set_lane3_link_training(struct exynos_dp_device *dp, +				u32 training_lane); +u32 exynos_dp_get_lane0_link_training(struct exynos_dp_device *dp); +u32 exynos_dp_get_lane1_link_training(struct exynos_dp_device *dp); +u32 exynos_dp_get_lane2_link_training(struct exynos_dp_device *dp); +u32 exynos_dp_get_lane3_link_training(struct exynos_dp_device *dp); +void exynos_dp_reset_macro(struct exynos_dp_device *dp); +void exynos_dp_init_video(struct exynos_dp_device *dp); + +void exynos_dp_set_video_color_format(struct exynos_dp_device *dp); +int exynos_dp_is_slave_video_stream_clock_on(struct exynos_dp_device *dp); +void exynos_dp_set_video_cr_mn(struct exynos_dp_device *dp, +			enum clock_recovery_m_value_type type, +			u32 m_value, +			u32 n_value); +void exynos_dp_set_video_timing_mode(struct exynos_dp_device *dp, u32 type); +void exynos_dp_enable_video_master(struct exynos_dp_device *dp, bool enable); +void exynos_dp_start_video(struct exynos_dp_device *dp); +int exynos_dp_is_video_stream_on(struct exynos_dp_device *dp); +void exynos_dp_config_video_slave_mode(struct exynos_dp_device *dp); +void exynos_dp_enable_scrambling(struct exynos_dp_device *dp); +void exynos_dp_disable_scrambling(struct exynos_dp_device *dp); + +/* I2C EDID Chip ID, Slave Address */ +#define I2C_EDID_DEVICE_ADDR			0x50 +#define I2C_E_EDID_DEVICE_ADDR			0x30 + +#define EDID_BLOCK_LENGTH			0x80 +#define EDID_HEADER_PATTERN			0x00 +#define EDID_EXTENSION_FLAG			0x7e +#define EDID_CHECKSUM				0x7f + +/* DP_MAX_LANE_COUNT */ +#define DPCD_ENHANCED_FRAME_CAP(x)		(((x) >> 7) & 0x1) +#define DPCD_MAX_LANE_COUNT(x)			((x) & 0x1f) + +/* DP_LANE_COUNT_SET */ +#define DPCD_LANE_COUNT_SET(x)			((x) & 0x1f) + +/* DP_TRAINING_LANE0_SET */ +#define DPCD_PRE_EMPHASIS_SET(x)		(((x) & 0x3) << 3) +#define DPCD_PRE_EMPHASIS_GET(x)		(((x) >> 3) & 0x3) +#define DPCD_VOLTAGE_SWING_SET(x)		(((x) & 0x3) << 0) +#define DPCD_VOLTAGE_SWING_GET(x)		(((x) >> 0) & 0x3) + +#endif /* _EXYNOS_DP_CORE_H */ diff --git a/drivers/gpu/drm/exynos/exynos_dp_reg.c b/drivers/gpu/drm/exynos/exynos_dp_reg.c new file mode 100644 index 00000000000..c1f87a2a928 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_dp_reg.c @@ -0,0 +1,1263 @@ +/* + * Samsung DP (Display port) register interface driver. + * + * Copyright (C) 2012 Samsung Electronics Co., Ltd. + * Author: Jingoo Han <jg1.han@samsung.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/device.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/gpio.h> + +#include "exynos_dp_core.h" +#include "exynos_dp_reg.h" + +#define COMMON_INT_MASK_1	0 +#define COMMON_INT_MASK_2	0 +#define COMMON_INT_MASK_3	0 +#define COMMON_INT_MASK_4	(HOTPLUG_CHG | HPD_LOST | PLUG) +#define INT_STA_MASK		INT_HPD + +void exynos_dp_enable_video_mute(struct exynos_dp_device *dp, bool enable) +{ +	u32 reg; + +	if (enable) { +		reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); +		reg |= HDCP_VIDEO_MUTE; +		writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); +	} else { +		reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); +		reg &= ~HDCP_VIDEO_MUTE; +		writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); +	} +} + +void exynos_dp_stop_video(struct exynos_dp_device *dp) +{ +	u32 reg; + +	reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); +	reg &= ~VIDEO_EN; +	writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); +} + +void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable) +{ +	u32 reg; + +	if (enable) +		reg = LANE3_MAP_LOGIC_LANE_0 | LANE2_MAP_LOGIC_LANE_1 | +			LANE1_MAP_LOGIC_LANE_2 | LANE0_MAP_LOGIC_LANE_3; +	else +		reg = LANE3_MAP_LOGIC_LANE_3 | LANE2_MAP_LOGIC_LANE_2 | +			LANE1_MAP_LOGIC_LANE_1 | LANE0_MAP_LOGIC_LANE_0; + +	writel(reg, dp->reg_base + EXYNOS_DP_LANE_MAP); +} + +void exynos_dp_init_analog_param(struct exynos_dp_device *dp) +{ +	u32 reg; + +	reg = TX_TERMINAL_CTRL_50_OHM; +	writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_1); + +	reg = SEL_24M | TX_DVDD_BIT_1_0625V; +	writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_2); + +	reg = DRIVE_DVDD_BIT_1_0625V | VCO_BIT_600_MICRO; +	writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_3); + +	reg = PD_RING_OSC | AUX_TERMINAL_CTRL_50_OHM | +		TX_CUR1_2X | TX_CUR_16_MA; +	writel(reg, dp->reg_base + EXYNOS_DP_PLL_FILTER_CTL_1); + +	reg = CH3_AMP_400_MV | CH2_AMP_400_MV | +		CH1_AMP_400_MV | CH0_AMP_400_MV; +	writel(reg, dp->reg_base + EXYNOS_DP_TX_AMP_TUNING_CTL); +} + +void exynos_dp_init_interrupt(struct exynos_dp_device *dp) +{ +	/* Set interrupt pin assertion polarity as high */ +	writel(INT_POL1 | INT_POL0, dp->reg_base + EXYNOS_DP_INT_CTL); + +	/* Clear pending regisers */ +	writel(0xff, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1); +	writel(0x4f, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_2); +	writel(0xe0, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_3); +	writel(0xe7, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4); +	writel(0x63, dp->reg_base + EXYNOS_DP_INT_STA); + +	/* 0:mask,1: unmask */ +	writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_1); +	writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_2); +	writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_3); +	writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_4); +	writel(0x00, dp->reg_base + EXYNOS_DP_INT_STA_MASK); +} + +void exynos_dp_reset(struct exynos_dp_device *dp) +{ +	u32 reg; + +	exynos_dp_stop_video(dp); +	exynos_dp_enable_video_mute(dp, 0); + +	reg = MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N | +		AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N | +		HDCP_FUNC_EN_N | SW_FUNC_EN_N; +	writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_1); + +	reg = SSC_FUNC_EN_N | AUX_FUNC_EN_N | +		SERDES_FIFO_FUNC_EN_N | +		LS_CLK_DOMAIN_FUNC_EN_N; +	writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2); + +	usleep_range(20, 30); + +	exynos_dp_lane_swap(dp, 0); + +	writel(0x0, dp->reg_base + EXYNOS_DP_SYS_CTL_1); +	writel(0x40, dp->reg_base + EXYNOS_DP_SYS_CTL_2); +	writel(0x0, dp->reg_base + EXYNOS_DP_SYS_CTL_3); +	writel(0x0, dp->reg_base + EXYNOS_DP_SYS_CTL_4); + +	writel(0x0, dp->reg_base + EXYNOS_DP_PKT_SEND_CTL); +	writel(0x0, dp->reg_base + EXYNOS_DP_HDCP_CTL); + +	writel(0x5e, dp->reg_base + EXYNOS_DP_HPD_DEGLITCH_L); +	writel(0x1a, dp->reg_base + EXYNOS_DP_HPD_DEGLITCH_H); + +	writel(0x10, dp->reg_base + EXYNOS_DP_LINK_DEBUG_CTL); + +	writel(0x0, dp->reg_base + EXYNOS_DP_PHY_TEST); + +	writel(0x0, dp->reg_base + EXYNOS_DP_VIDEO_FIFO_THRD); +	writel(0x20, dp->reg_base + EXYNOS_DP_AUDIO_MARGIN); + +	writel(0x4, dp->reg_base + EXYNOS_DP_M_VID_GEN_FILTER_TH); +	writel(0x2, dp->reg_base + EXYNOS_DP_M_AUD_GEN_FILTER_TH); + +	writel(0x00000101, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); +} + +void exynos_dp_swreset(struct exynos_dp_device *dp) +{ +	writel(RESET_DP_TX, dp->reg_base + EXYNOS_DP_TX_SW_RESET); +} + +void exynos_dp_config_interrupt(struct exynos_dp_device *dp) +{ +	u32 reg; + +	/* 0: mask, 1: unmask */ +	reg = COMMON_INT_MASK_1; +	writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_1); + +	reg = COMMON_INT_MASK_2; +	writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_2); + +	reg = COMMON_INT_MASK_3; +	writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_3); + +	reg = COMMON_INT_MASK_4; +	writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_4); + +	reg = INT_STA_MASK; +	writel(reg, dp->reg_base + EXYNOS_DP_INT_STA_MASK); +} + +enum pll_status exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp) +{ +	u32 reg; + +	reg = readl(dp->reg_base + EXYNOS_DP_DEBUG_CTL); +	if (reg & PLL_LOCK) +		return PLL_LOCKED; +	else +		return PLL_UNLOCKED; +} + +void exynos_dp_set_pll_power_down(struct exynos_dp_device *dp, bool enable) +{ +	u32 reg; + +	if (enable) { +		reg = readl(dp->reg_base + EXYNOS_DP_PLL_CTL); +		reg |= DP_PLL_PD; +		writel(reg, dp->reg_base + EXYNOS_DP_PLL_CTL); +	} else { +		reg = readl(dp->reg_base + EXYNOS_DP_PLL_CTL); +		reg &= ~DP_PLL_PD; +		writel(reg, dp->reg_base + EXYNOS_DP_PLL_CTL); +	} +} + +void exynos_dp_set_analog_power_down(struct exynos_dp_device *dp, +				enum analog_power_block block, +				bool enable) +{ +	u32 reg; + +	switch (block) { +	case AUX_BLOCK: +		if (enable) { +			reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); +			reg |= AUX_PD; +			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); +		} else { +			reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); +			reg &= ~AUX_PD; +			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); +		} +		break; +	case CH0_BLOCK: +		if (enable) { +			reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); +			reg |= CH0_PD; +			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); +		} else { +			reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); +			reg &= ~CH0_PD; +			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); +		} +		break; +	case CH1_BLOCK: +		if (enable) { +			reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); +			reg |= CH1_PD; +			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); +		} else { +			reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); +			reg &= ~CH1_PD; +			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); +		} +		break; +	case CH2_BLOCK: +		if (enable) { +			reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); +			reg |= CH2_PD; +			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); +		} else { +			reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); +			reg &= ~CH2_PD; +			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); +		} +		break; +	case CH3_BLOCK: +		if (enable) { +			reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); +			reg |= CH3_PD; +			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); +		} else { +			reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); +			reg &= ~CH3_PD; +			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); +		} +		break; +	case ANALOG_TOTAL: +		if (enable) { +			reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); +			reg |= DP_PHY_PD; +			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); +		} else { +			reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); +			reg &= ~DP_PHY_PD; +			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); +		} +		break; +	case POWER_ALL: +		if (enable) { +			reg = DP_PHY_PD | AUX_PD | CH3_PD | CH2_PD | +				CH1_PD | CH0_PD; +			writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); +		} else { +			writel(0x00, dp->reg_base + EXYNOS_DP_PHY_PD); +		} +		break; +	default: +		break; +	} +} + +void exynos_dp_init_analog_func(struct exynos_dp_device *dp) +{ +	u32 reg; +	int timeout_loop = 0; + +	exynos_dp_set_analog_power_down(dp, POWER_ALL, 0); + +	reg = PLL_LOCK_CHG; +	writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1); + +	reg = readl(dp->reg_base + EXYNOS_DP_DEBUG_CTL); +	reg &= ~(F_PLL_LOCK | PLL_LOCK_CTRL); +	writel(reg, dp->reg_base + EXYNOS_DP_DEBUG_CTL); + +	/* Power up PLL */ +	if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { +		exynos_dp_set_pll_power_down(dp, 0); + +		while (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { +			timeout_loop++; +			if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) { +				dev_err(dp->dev, "failed to get pll lock status\n"); +				return; +			} +			usleep_range(10, 20); +		} +	} + +	/* Enable Serdes FIFO function and Link symbol clock domain module */ +	reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2); +	reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N +		| AUX_FUNC_EN_N); +	writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2); +} + +void exynos_dp_clear_hotplug_interrupts(struct exynos_dp_device *dp) +{ +	u32 reg; + +	if (gpio_is_valid(dp->hpd_gpio)) +		return; + +	reg = HOTPLUG_CHG | HPD_LOST | PLUG; +	writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4); + +	reg = INT_HPD; +	writel(reg, dp->reg_base + EXYNOS_DP_INT_STA); +} + +void exynos_dp_init_hpd(struct exynos_dp_device *dp) +{ +	u32 reg; + +	if (gpio_is_valid(dp->hpd_gpio)) +		return; + +	exynos_dp_clear_hotplug_interrupts(dp); + +	reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3); +	reg &= ~(F_HPD | HPD_CTRL); +	writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3); +} + +enum dp_irq_type exynos_dp_get_irq_type(struct exynos_dp_device *dp) +{ +	u32 reg; + +	if (gpio_is_valid(dp->hpd_gpio)) { +		reg = gpio_get_value(dp->hpd_gpio); +		if (reg) +			return DP_IRQ_TYPE_HP_CABLE_IN; +		else +			return DP_IRQ_TYPE_HP_CABLE_OUT; +	} else { +		/* Parse hotplug interrupt status register */ +		reg = readl(dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4); + +		if (reg & PLUG) +			return DP_IRQ_TYPE_HP_CABLE_IN; + +		if (reg & HPD_LOST) +			return DP_IRQ_TYPE_HP_CABLE_OUT; + +		if (reg & HOTPLUG_CHG) +			return DP_IRQ_TYPE_HP_CHANGE; + +		return DP_IRQ_TYPE_UNKNOWN; +	} +} + +void exynos_dp_reset_aux(struct exynos_dp_device *dp) +{ +	u32 reg; + +	/* Disable AUX channel module */ +	reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2); +	reg |= AUX_FUNC_EN_N; +	writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2); +} + +void exynos_dp_init_aux(struct exynos_dp_device *dp) +{ +	u32 reg; + +	/* Clear inerrupts related to AUX channel */ +	reg = RPLY_RECEIV | AUX_ERR; +	writel(reg, dp->reg_base + EXYNOS_DP_INT_STA); + +	exynos_dp_reset_aux(dp); + +	/* Disable AUX transaction H/W retry */ +	reg = AUX_BIT_PERIOD_EXPECTED_DELAY(3) | AUX_HW_RETRY_COUNT_SEL(0)| +		AUX_HW_RETRY_INTERVAL_600_MICROSECONDS; +	writel(reg, dp->reg_base + EXYNOS_DP_AUX_HW_RETRY_CTL); + +	/* Receive AUX Channel DEFER commands equal to DEFFER_COUNT*64 */ +	reg = DEFER_CTRL_EN | DEFER_COUNT(1); +	writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_DEFER_CTL); + +	/* Enable AUX channel module */ +	reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2); +	reg &= ~AUX_FUNC_EN_N; +	writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2); +} + +int exynos_dp_get_plug_in_status(struct exynos_dp_device *dp) +{ +	u32 reg; + +	if (gpio_is_valid(dp->hpd_gpio)) { +		if (gpio_get_value(dp->hpd_gpio)) +			return 0; +	} else { +		reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3); +		if (reg & HPD_STATUS) +			return 0; +	} + +	return -EINVAL; +} + +void exynos_dp_enable_sw_function(struct exynos_dp_device *dp) +{ +	u32 reg; + +	reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_1); +	reg &= ~SW_FUNC_EN_N; +	writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_1); +} + +int exynos_dp_start_aux_transaction(struct exynos_dp_device *dp) +{ +	int reg; +	int retval = 0; +	int timeout_loop = 0; + +	/* Enable AUX CH operation */ +	reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2); +	reg |= AUX_EN; +	writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2); + +	/* Is AUX CH command reply received? */ +	reg = readl(dp->reg_base + EXYNOS_DP_INT_STA); +	while (!(reg & RPLY_RECEIV)) { +		timeout_loop++; +		if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) { +			dev_err(dp->dev, "AUX CH command reply failed!\n"); +			return -ETIMEDOUT; +		} +		reg = readl(dp->reg_base + EXYNOS_DP_INT_STA); +		usleep_range(10, 11); +	} + +	/* Clear interrupt source for AUX CH command reply */ +	writel(RPLY_RECEIV, dp->reg_base + EXYNOS_DP_INT_STA); + +	/* Clear interrupt source for AUX CH access error */ +	reg = readl(dp->reg_base + EXYNOS_DP_INT_STA); +	if (reg & AUX_ERR) { +		writel(AUX_ERR, dp->reg_base + EXYNOS_DP_INT_STA); +		return -EREMOTEIO; +	} + +	/* Check AUX CH error access status */ +	reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_STA); +	if ((reg & AUX_STATUS_MASK) != 0) { +		dev_err(dp->dev, "AUX CH error happens: %d\n\n", +			reg & AUX_STATUS_MASK); +		return -EREMOTEIO; +	} + +	return retval; +} + +int exynos_dp_write_byte_to_dpcd(struct exynos_dp_device *dp, +				unsigned int reg_addr, +				unsigned char data) +{ +	u32 reg; +	int i; +	int retval; + +	for (i = 0; i < 3; i++) { +		/* Clear AUX CH data buffer */ +		reg = BUF_CLR; +		writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL); + +		/* Select DPCD device address */ +		reg = AUX_ADDR_7_0(reg_addr); +		writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0); +		reg = AUX_ADDR_15_8(reg_addr); +		writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8); +		reg = AUX_ADDR_19_16(reg_addr); +		writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16); + +		/* Write data buffer */ +		reg = (unsigned int)data; +		writel(reg, dp->reg_base + EXYNOS_DP_BUF_DATA_0); + +		/* +		 * Set DisplayPort transaction and write 1 byte +		 * If bit 3 is 1, DisplayPort transaction. +		 * If Bit 3 is 0, I2C transaction. +		 */ +		reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE; +		writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); + +		/* Start AUX transaction */ +		retval = exynos_dp_start_aux_transaction(dp); +		if (retval == 0) +			break; +		else +			dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", +				__func__); +	} + +	return retval; +} + +int exynos_dp_read_byte_from_dpcd(struct exynos_dp_device *dp, +				unsigned int reg_addr, +				unsigned char *data) +{ +	u32 reg; +	int i; +	int retval; + +	for (i = 0; i < 3; i++) { +		/* Clear AUX CH data buffer */ +		reg = BUF_CLR; +		writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL); + +		/* Select DPCD device address */ +		reg = AUX_ADDR_7_0(reg_addr); +		writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0); +		reg = AUX_ADDR_15_8(reg_addr); +		writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8); +		reg = AUX_ADDR_19_16(reg_addr); +		writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16); + +		/* +		 * Set DisplayPort transaction and read 1 byte +		 * If bit 3 is 1, DisplayPort transaction. +		 * If Bit 3 is 0, I2C transaction. +		 */ +		reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ; +		writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); + +		/* Start AUX transaction */ +		retval = exynos_dp_start_aux_transaction(dp); +		if (retval == 0) +			break; +		else +			dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", +				__func__); +	} + +	/* Read data buffer */ +	reg = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0); +	*data = (unsigned char)(reg & 0xff); + +	return retval; +} + +int exynos_dp_write_bytes_to_dpcd(struct exynos_dp_device *dp, +				unsigned int reg_addr, +				unsigned int count, +				unsigned char data[]) +{ +	u32 reg; +	unsigned int start_offset; +	unsigned int cur_data_count; +	unsigned int cur_data_idx; +	int i; +	int retval = 0; + +	/* Clear AUX CH data buffer */ +	reg = BUF_CLR; +	writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL); + +	start_offset = 0; +	while (start_offset < count) { +		/* Buffer size of AUX CH is 16 * 4bytes */ +		if ((count - start_offset) > 16) +			cur_data_count = 16; +		else +			cur_data_count = count - start_offset; + +		for (i = 0; i < 3; i++) { +			/* Select DPCD device address */ +			reg = AUX_ADDR_7_0(reg_addr + start_offset); +			writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0); +			reg = AUX_ADDR_15_8(reg_addr + start_offset); +			writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8); +			reg = AUX_ADDR_19_16(reg_addr + start_offset); +			writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16); + +			for (cur_data_idx = 0; cur_data_idx < cur_data_count; +			     cur_data_idx++) { +				reg = data[start_offset + cur_data_idx]; +				writel(reg, dp->reg_base + EXYNOS_DP_BUF_DATA_0 +							  + 4 * cur_data_idx); +			} + +			/* +			 * Set DisplayPort transaction and write +			 * If bit 3 is 1, DisplayPort transaction. +			 * If Bit 3 is 0, I2C transaction. +			 */ +			reg = AUX_LENGTH(cur_data_count) | +				AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE; +			writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); + +			/* Start AUX transaction */ +			retval = exynos_dp_start_aux_transaction(dp); +			if (retval == 0) +				break; +			else +				dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", +					__func__); +		} + +		start_offset += cur_data_count; +	} + +	return retval; +} + +int exynos_dp_read_bytes_from_dpcd(struct exynos_dp_device *dp, +				unsigned int reg_addr, +				unsigned int count, +				unsigned char data[]) +{ +	u32 reg; +	unsigned int start_offset; +	unsigned int cur_data_count; +	unsigned int cur_data_idx; +	int i; +	int retval = 0; + +	/* Clear AUX CH data buffer */ +	reg = BUF_CLR; +	writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL); + +	start_offset = 0; +	while (start_offset < count) { +		/* Buffer size of AUX CH is 16 * 4bytes */ +		if ((count - start_offset) > 16) +			cur_data_count = 16; +		else +			cur_data_count = count - start_offset; + +		/* AUX CH Request Transaction process */ +		for (i = 0; i < 3; i++) { +			/* Select DPCD device address */ +			reg = AUX_ADDR_7_0(reg_addr + start_offset); +			writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0); +			reg = AUX_ADDR_15_8(reg_addr + start_offset); +			writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8); +			reg = AUX_ADDR_19_16(reg_addr + start_offset); +			writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16); + +			/* +			 * Set DisplayPort transaction and read +			 * If bit 3 is 1, DisplayPort transaction. +			 * If Bit 3 is 0, I2C transaction. +			 */ +			reg = AUX_LENGTH(cur_data_count) | +				AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ; +			writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); + +			/* Start AUX transaction */ +			retval = exynos_dp_start_aux_transaction(dp); +			if (retval == 0) +				break; +			else +				dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", +					__func__); +		} + +		for (cur_data_idx = 0; cur_data_idx < cur_data_count; +		    cur_data_idx++) { +			reg = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0 +						 + 4 * cur_data_idx); +			data[start_offset + cur_data_idx] = +				(unsigned char)reg; +		} + +		start_offset += cur_data_count; +	} + +	return retval; +} + +int exynos_dp_select_i2c_device(struct exynos_dp_device *dp, +				unsigned int device_addr, +				unsigned int reg_addr) +{ +	u32 reg; +	int retval; + +	/* Set EDID device address */ +	reg = device_addr; +	writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0); +	writel(0x0, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8); +	writel(0x0, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16); + +	/* Set offset from base address of EDID device */ +	writel(reg_addr, dp->reg_base + EXYNOS_DP_BUF_DATA_0); + +	/* +	 * Set I2C transaction and write address +	 * If bit 3 is 1, DisplayPort transaction. +	 * If Bit 3 is 0, I2C transaction. +	 */ +	reg = AUX_TX_COMM_I2C_TRANSACTION | AUX_TX_COMM_MOT | +		AUX_TX_COMM_WRITE; +	writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); + +	/* Start AUX transaction */ +	retval = exynos_dp_start_aux_transaction(dp); +	if (retval != 0) +		dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__); + +	return retval; +} + +int exynos_dp_read_byte_from_i2c(struct exynos_dp_device *dp, +				unsigned int device_addr, +				unsigned int reg_addr, +				unsigned int *data) +{ +	u32 reg; +	int i; +	int retval; + +	for (i = 0; i < 3; i++) { +		/* Clear AUX CH data buffer */ +		reg = BUF_CLR; +		writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL); + +		/* Select EDID device */ +		retval = exynos_dp_select_i2c_device(dp, device_addr, reg_addr); +		if (retval != 0) +			continue; + +		/* +		 * Set I2C transaction and read data +		 * If bit 3 is 1, DisplayPort transaction. +		 * If Bit 3 is 0, I2C transaction. +		 */ +		reg = AUX_TX_COMM_I2C_TRANSACTION | +			AUX_TX_COMM_READ; +		writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); + +		/* Start AUX transaction */ +		retval = exynos_dp_start_aux_transaction(dp); +		if (retval == 0) +			break; +		else +			dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", +				__func__); +	} + +	/* Read data */ +	if (retval == 0) +		*data = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0); + +	return retval; +} + +int exynos_dp_read_bytes_from_i2c(struct exynos_dp_device *dp, +				unsigned int device_addr, +				unsigned int reg_addr, +				unsigned int count, +				unsigned char edid[]) +{ +	u32 reg; +	unsigned int i, j; +	unsigned int cur_data_idx; +	unsigned int defer = 0; +	int retval = 0; + +	for (i = 0; i < count; i += 16) { +		for (j = 0; j < 3; j++) { +			/* Clear AUX CH data buffer */ +			reg = BUF_CLR; +			writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL); + +			/* Set normal AUX CH command */ +			reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2); +			reg &= ~ADDR_ONLY; +			writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2); + +			/* +			 * If Rx sends defer, Tx sends only reads +			 * request without sending address +			 */ +			if (!defer) +				retval = exynos_dp_select_i2c_device(dp, +						device_addr, reg_addr + i); +			else +				defer = 0; + +			if (retval == 0) { +				/* +				 * Set I2C transaction and write data +				 * If bit 3 is 1, DisplayPort transaction. +				 * If Bit 3 is 0, I2C transaction. +				 */ +				reg = AUX_LENGTH(16) | +					AUX_TX_COMM_I2C_TRANSACTION | +					AUX_TX_COMM_READ; +				writel(reg, dp->reg_base + +					EXYNOS_DP_AUX_CH_CTL_1); + +				/* Start AUX transaction */ +				retval = exynos_dp_start_aux_transaction(dp); +				if (retval == 0) +					break; +				else +					dev_dbg(dp->dev, +						"%s: Aux Transaction fail!\n", +						__func__); +			} +			/* Check if Rx sends defer */ +			reg = readl(dp->reg_base + EXYNOS_DP_AUX_RX_COMM); +			if (reg == AUX_RX_COMM_AUX_DEFER || +				reg == AUX_RX_COMM_I2C_DEFER) { +				dev_err(dp->dev, "Defer: %d\n\n", reg); +				defer = 1; +			} +		} + +		for (cur_data_idx = 0; cur_data_idx < 16; cur_data_idx++) { +			reg = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0 +						 + 4 * cur_data_idx); +			edid[i + cur_data_idx] = (unsigned char)reg; +		} +	} + +	return retval; +} + +void exynos_dp_set_link_bandwidth(struct exynos_dp_device *dp, u32 bwtype) +{ +	u32 reg; + +	reg = bwtype; +	if ((bwtype == LINK_RATE_2_70GBPS) || (bwtype == LINK_RATE_1_62GBPS)) +		writel(reg, dp->reg_base + EXYNOS_DP_LINK_BW_SET); +} + +void exynos_dp_get_link_bandwidth(struct exynos_dp_device *dp, u32 *bwtype) +{ +	u32 reg; + +	reg = readl(dp->reg_base + EXYNOS_DP_LINK_BW_SET); +	*bwtype = reg; +} + +void exynos_dp_set_lane_count(struct exynos_dp_device *dp, u32 count) +{ +	u32 reg; + +	reg = count; +	writel(reg, dp->reg_base + EXYNOS_DP_LANE_COUNT_SET); +} + +void exynos_dp_get_lane_count(struct exynos_dp_device *dp, u32 *count) +{ +	u32 reg; + +	reg = readl(dp->reg_base + EXYNOS_DP_LANE_COUNT_SET); +	*count = reg; +} + +void exynos_dp_enable_enhanced_mode(struct exynos_dp_device *dp, bool enable) +{ +	u32 reg; + +	if (enable) { +		reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4); +		reg |= ENHANCED; +		writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4); +	} else { +		reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4); +		reg &= ~ENHANCED; +		writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4); +	} +} + +void exynos_dp_set_training_pattern(struct exynos_dp_device *dp, +				 enum pattern_set pattern) +{ +	u32 reg; + +	switch (pattern) { +	case PRBS7: +		reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_PRBS7; +		writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); +		break; +	case D10_2: +		reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_D10_2; +		writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); +		break; +	case TRAINING_PTN1: +		reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN1; +		writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); +		break; +	case TRAINING_PTN2: +		reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN2; +		writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); +		break; +	case DP_NONE: +		reg = SCRAMBLING_ENABLE | +			LINK_QUAL_PATTERN_SET_DISABLE | +			SW_TRAINING_PATTERN_SET_NORMAL; +		writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); +		break; +	default: +		break; +	} +} + +void exynos_dp_set_lane0_pre_emphasis(struct exynos_dp_device *dp, u32 level) +{ +	u32 reg; + +	reg = readl(dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL); +	reg &= ~PRE_EMPHASIS_SET_MASK; +	reg |= level << PRE_EMPHASIS_SET_SHIFT; +	writel(reg, dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL); +} + +void exynos_dp_set_lane1_pre_emphasis(struct exynos_dp_device *dp, u32 level) +{ +	u32 reg; + +	reg = readl(dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL); +	reg &= ~PRE_EMPHASIS_SET_MASK; +	reg |= level << PRE_EMPHASIS_SET_SHIFT; +	writel(reg, dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL); +} + +void exynos_dp_set_lane2_pre_emphasis(struct exynos_dp_device *dp, u32 level) +{ +	u32 reg; + +	reg = readl(dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL); +	reg &= ~PRE_EMPHASIS_SET_MASK; +	reg |= level << PRE_EMPHASIS_SET_SHIFT; +	writel(reg, dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL); +} + +void exynos_dp_set_lane3_pre_emphasis(struct exynos_dp_device *dp, u32 level) +{ +	u32 reg; + +	reg = readl(dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL); +	reg &= ~PRE_EMPHASIS_SET_MASK; +	reg |= level << PRE_EMPHASIS_SET_SHIFT; +	writel(reg, dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL); +} + +void exynos_dp_set_lane0_link_training(struct exynos_dp_device *dp, +					u32 training_lane) +{ +	u32 reg; + +	reg = training_lane; +	writel(reg, dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL); +} + +void exynos_dp_set_lane1_link_training(struct exynos_dp_device *dp, +					u32 training_lane) +{ +	u32 reg; + +	reg = training_lane; +	writel(reg, dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL); +} + +void exynos_dp_set_lane2_link_training(struct exynos_dp_device *dp, +					u32 training_lane) +{ +	u32 reg; + +	reg = training_lane; +	writel(reg, dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL); +} + +void exynos_dp_set_lane3_link_training(struct exynos_dp_device *dp, +					u32 training_lane) +{ +	u32 reg; + +	reg = training_lane; +	writel(reg, dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL); +} + +u32 exynos_dp_get_lane0_link_training(struct exynos_dp_device *dp) +{ +	u32 reg; + +	reg = readl(dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL); +	return reg; +} + +u32 exynos_dp_get_lane1_link_training(struct exynos_dp_device *dp) +{ +	u32 reg; + +	reg = readl(dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL); +	return reg; +} + +u32 exynos_dp_get_lane2_link_training(struct exynos_dp_device *dp) +{ +	u32 reg; + +	reg = readl(dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL); +	return reg; +} + +u32 exynos_dp_get_lane3_link_training(struct exynos_dp_device *dp) +{ +	u32 reg; + +	reg = readl(dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL); +	return reg; +} + +void exynos_dp_reset_macro(struct exynos_dp_device *dp) +{ +	u32 reg; + +	reg = readl(dp->reg_base + EXYNOS_DP_PHY_TEST); +	reg |= MACRO_RST; +	writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST); + +	/* 10 us is the minimum reset time. */ +	usleep_range(10, 20); + +	reg &= ~MACRO_RST; +	writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST); +} + +void exynos_dp_init_video(struct exynos_dp_device *dp) +{ +	u32 reg; + +	reg = VSYNC_DET | VID_FORMAT_CHG | VID_CLK_CHG; +	writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1); + +	reg = 0x0; +	writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_1); + +	reg = CHA_CRI(4) | CHA_CTRL; +	writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_2); + +	reg = 0x0; +	writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3); + +	reg = VID_HRES_TH(2) | VID_VRES_TH(0); +	writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_8); +} + +void exynos_dp_set_video_color_format(struct exynos_dp_device *dp) +{ +	u32 reg; + +	/* Configure the input color depth, color space, dynamic range */ +	reg = (dp->video_info->dynamic_range << IN_D_RANGE_SHIFT) | +		(dp->video_info->color_depth << IN_BPC_SHIFT) | +		(dp->video_info->color_space << IN_COLOR_F_SHIFT); +	writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_2); + +	/* Set Input Color YCbCr Coefficients to ITU601 or ITU709 */ +	reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_3); +	reg &= ~IN_YC_COEFFI_MASK; +	if (dp->video_info->ycbcr_coeff) +		reg |= IN_YC_COEFFI_ITU709; +	else +		reg |= IN_YC_COEFFI_ITU601; +	writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_3); +} + +int exynos_dp_is_slave_video_stream_clock_on(struct exynos_dp_device *dp) +{ +	u32 reg; + +	reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_1); +	writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_1); + +	reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_1); + +	if (!(reg & DET_STA)) { +		dev_dbg(dp->dev, "Input stream clock not detected.\n"); +		return -EINVAL; +	} + +	reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_2); +	writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_2); + +	reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_2); +	dev_dbg(dp->dev, "wait SYS_CTL_2.\n"); + +	if (reg & CHA_STA) { +		dev_dbg(dp->dev, "Input stream clk is changing\n"); +		return -EINVAL; +	} + +	return 0; +} + +void exynos_dp_set_video_cr_mn(struct exynos_dp_device *dp, +		enum clock_recovery_m_value_type type, +		u32 m_value, +		u32 n_value) +{ +	u32 reg; + +	if (type == REGISTER_M) { +		reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4); +		reg |= FIX_M_VID; +		writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4); +		reg = m_value & 0xff; +		writel(reg, dp->reg_base + EXYNOS_DP_M_VID_0); +		reg = (m_value >> 8) & 0xff; +		writel(reg, dp->reg_base + EXYNOS_DP_M_VID_1); +		reg = (m_value >> 16) & 0xff; +		writel(reg, dp->reg_base + EXYNOS_DP_M_VID_2); + +		reg = n_value & 0xff; +		writel(reg, dp->reg_base + EXYNOS_DP_N_VID_0); +		reg = (n_value >> 8) & 0xff; +		writel(reg, dp->reg_base + EXYNOS_DP_N_VID_1); +		reg = (n_value >> 16) & 0xff; +		writel(reg, dp->reg_base + EXYNOS_DP_N_VID_2); +	} else  { +		reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4); +		reg &= ~FIX_M_VID; +		writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4); + +		writel(0x00, dp->reg_base + EXYNOS_DP_N_VID_0); +		writel(0x80, dp->reg_base + EXYNOS_DP_N_VID_1); +		writel(0x00, dp->reg_base + EXYNOS_DP_N_VID_2); +	} +} + +void exynos_dp_set_video_timing_mode(struct exynos_dp_device *dp, u32 type) +{ +	u32 reg; + +	if (type == VIDEO_TIMING_FROM_CAPTURE) { +		reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); +		reg &= ~FORMAT_SEL; +		writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); +	} else { +		reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); +		reg |= FORMAT_SEL; +		writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); +	} +} + +void exynos_dp_enable_video_master(struct exynos_dp_device *dp, bool enable) +{ +	u32 reg; + +	if (enable) { +		reg = readl(dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); +		reg &= ~VIDEO_MODE_MASK; +		reg |= VIDEO_MASTER_MODE_EN | VIDEO_MODE_MASTER_MODE; +		writel(reg, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); +	} else { +		reg = readl(dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); +		reg &= ~VIDEO_MODE_MASK; +		reg |= VIDEO_MODE_SLAVE_MODE; +		writel(reg, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); +	} +} + +void exynos_dp_start_video(struct exynos_dp_device *dp) +{ +	u32 reg; + +	reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); +	reg |= VIDEO_EN; +	writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); +} + +int exynos_dp_is_video_stream_on(struct exynos_dp_device *dp) +{ +	u32 reg; + +	reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3); +	writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3); + +	reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3); +	if (!(reg & STRM_VALID)) { +		dev_dbg(dp->dev, "Input video stream is not detected.\n"); +		return -EINVAL; +	} + +	return 0; +} + +void exynos_dp_config_video_slave_mode(struct exynos_dp_device *dp) +{ +	u32 reg; + +	reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_1); +	reg &= ~(MASTER_VID_FUNC_EN_N|SLAVE_VID_FUNC_EN_N); +	reg |= MASTER_VID_FUNC_EN_N; +	writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_1); + +	reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); +	reg &= ~INTERACE_SCAN_CFG; +	reg |= (dp->video_info->interlaced << 2); +	writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); + +	reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); +	reg &= ~VSYNC_POLARITY_CFG; +	reg |= (dp->video_info->v_sync_polarity << 1); +	writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); + +	reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); +	reg &= ~HSYNC_POLARITY_CFG; +	reg |= (dp->video_info->h_sync_polarity << 0); +	writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); + +	reg = AUDIO_MODE_SPDIF_MODE | VIDEO_MODE_SLAVE_MODE; +	writel(reg, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); +} + +void exynos_dp_enable_scrambling(struct exynos_dp_device *dp) +{ +	u32 reg; + +	reg = readl(dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); +	reg &= ~SCRAMBLING_DISABLE; +	writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); +} + +void exynos_dp_disable_scrambling(struct exynos_dp_device *dp) +{ +	u32 reg; + +	reg = readl(dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); +	reg |= SCRAMBLING_DISABLE; +	writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); +} diff --git a/drivers/gpu/drm/exynos/exynos_dp_reg.h b/drivers/gpu/drm/exynos/exynos_dp_reg.h new file mode 100644 index 00000000000..2e9bd0e0b9f --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_dp_reg.h @@ -0,0 +1,366 @@ +/* + * Register definition file for Samsung DP driver + * + * Copyright (C) 2012 Samsung Electronics Co., Ltd. + * Author: Jingoo Han <jg1.han@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _EXYNOS_DP_REG_H +#define _EXYNOS_DP_REG_H + +#define EXYNOS_DP_TX_SW_RESET			0x14 +#define EXYNOS_DP_FUNC_EN_1			0x18 +#define EXYNOS_DP_FUNC_EN_2			0x1C +#define EXYNOS_DP_VIDEO_CTL_1			0x20 +#define EXYNOS_DP_VIDEO_CTL_2			0x24 +#define EXYNOS_DP_VIDEO_CTL_3			0x28 + +#define EXYNOS_DP_VIDEO_CTL_8			0x3C +#define EXYNOS_DP_VIDEO_CTL_10			0x44 + +#define EXYNOS_DP_LANE_MAP			0x35C + +#define EXYNOS_DP_ANALOG_CTL_1			0x370 +#define EXYNOS_DP_ANALOG_CTL_2			0x374 +#define EXYNOS_DP_ANALOG_CTL_3			0x378 +#define EXYNOS_DP_PLL_FILTER_CTL_1		0x37C +#define EXYNOS_DP_TX_AMP_TUNING_CTL		0x380 + +#define EXYNOS_DP_AUX_HW_RETRY_CTL		0x390 + +#define EXYNOS_DP_COMMON_INT_STA_1		0x3C4 +#define EXYNOS_DP_COMMON_INT_STA_2		0x3C8 +#define EXYNOS_DP_COMMON_INT_STA_3		0x3CC +#define EXYNOS_DP_COMMON_INT_STA_4		0x3D0 +#define EXYNOS_DP_INT_STA			0x3DC +#define EXYNOS_DP_COMMON_INT_MASK_1		0x3E0 +#define EXYNOS_DP_COMMON_INT_MASK_2		0x3E4 +#define EXYNOS_DP_COMMON_INT_MASK_3		0x3E8 +#define EXYNOS_DP_COMMON_INT_MASK_4		0x3EC +#define EXYNOS_DP_INT_STA_MASK			0x3F8 +#define EXYNOS_DP_INT_CTL			0x3FC + +#define EXYNOS_DP_SYS_CTL_1			0x600 +#define EXYNOS_DP_SYS_CTL_2			0x604 +#define EXYNOS_DP_SYS_CTL_3			0x608 +#define EXYNOS_DP_SYS_CTL_4			0x60C + +#define EXYNOS_DP_PKT_SEND_CTL			0x640 +#define EXYNOS_DP_HDCP_CTL			0x648 + +#define EXYNOS_DP_LINK_BW_SET			0x680 +#define EXYNOS_DP_LANE_COUNT_SET		0x684 +#define EXYNOS_DP_TRAINING_PTN_SET		0x688 +#define EXYNOS_DP_LN0_LINK_TRAINING_CTL		0x68C +#define EXYNOS_DP_LN1_LINK_TRAINING_CTL		0x690 +#define EXYNOS_DP_LN2_LINK_TRAINING_CTL		0x694 +#define EXYNOS_DP_LN3_LINK_TRAINING_CTL		0x698 + +#define EXYNOS_DP_DEBUG_CTL			0x6C0 +#define EXYNOS_DP_HPD_DEGLITCH_L		0x6C4 +#define EXYNOS_DP_HPD_DEGLITCH_H		0x6C8 +#define EXYNOS_DP_LINK_DEBUG_CTL		0x6E0 + +#define EXYNOS_DP_M_VID_0			0x700 +#define EXYNOS_DP_M_VID_1			0x704 +#define EXYNOS_DP_M_VID_2			0x708 +#define EXYNOS_DP_N_VID_0			0x70C +#define EXYNOS_DP_N_VID_1			0x710 +#define EXYNOS_DP_N_VID_2			0x714 + +#define EXYNOS_DP_PLL_CTL			0x71C +#define EXYNOS_DP_PHY_PD			0x720 +#define EXYNOS_DP_PHY_TEST			0x724 + +#define EXYNOS_DP_VIDEO_FIFO_THRD		0x730 +#define EXYNOS_DP_AUDIO_MARGIN			0x73C + +#define EXYNOS_DP_M_VID_GEN_FILTER_TH		0x764 +#define EXYNOS_DP_M_AUD_GEN_FILTER_TH		0x778 +#define EXYNOS_DP_AUX_CH_STA			0x780 +#define EXYNOS_DP_AUX_CH_DEFER_CTL		0x788 +#define EXYNOS_DP_AUX_RX_COMM			0x78C +#define EXYNOS_DP_BUFFER_DATA_CTL		0x790 +#define EXYNOS_DP_AUX_CH_CTL_1			0x794 +#define EXYNOS_DP_AUX_ADDR_7_0			0x798 +#define EXYNOS_DP_AUX_ADDR_15_8			0x79C +#define EXYNOS_DP_AUX_ADDR_19_16		0x7A0 +#define EXYNOS_DP_AUX_CH_CTL_2			0x7A4 + +#define EXYNOS_DP_BUF_DATA_0			0x7C0 + +#define EXYNOS_DP_SOC_GENERAL_CTL		0x800 + +/* EXYNOS_DP_TX_SW_RESET */ +#define RESET_DP_TX				(0x1 << 0) + +/* EXYNOS_DP_FUNC_EN_1 */ +#define MASTER_VID_FUNC_EN_N			(0x1 << 7) +#define SLAVE_VID_FUNC_EN_N			(0x1 << 5) +#define AUD_FIFO_FUNC_EN_N			(0x1 << 4) +#define AUD_FUNC_EN_N				(0x1 << 3) +#define HDCP_FUNC_EN_N				(0x1 << 2) +#define CRC_FUNC_EN_N				(0x1 << 1) +#define SW_FUNC_EN_N				(0x1 << 0) + +/* EXYNOS_DP_FUNC_EN_2 */ +#define SSC_FUNC_EN_N				(0x1 << 7) +#define AUX_FUNC_EN_N				(0x1 << 2) +#define SERDES_FIFO_FUNC_EN_N			(0x1 << 1) +#define LS_CLK_DOMAIN_FUNC_EN_N			(0x1 << 0) + +/* EXYNOS_DP_VIDEO_CTL_1 */ +#define VIDEO_EN				(0x1 << 7) +#define HDCP_VIDEO_MUTE				(0x1 << 6) + +/* EXYNOS_DP_VIDEO_CTL_1 */ +#define IN_D_RANGE_MASK				(0x1 << 7) +#define IN_D_RANGE_SHIFT			(7) +#define IN_D_RANGE_CEA				(0x1 << 7) +#define IN_D_RANGE_VESA				(0x0 << 7) +#define IN_BPC_MASK				(0x7 << 4) +#define IN_BPC_SHIFT				(4) +#define IN_BPC_12_BITS				(0x3 << 4) +#define IN_BPC_10_BITS				(0x2 << 4) +#define IN_BPC_8_BITS				(0x1 << 4) +#define IN_BPC_6_BITS				(0x0 << 4) +#define IN_COLOR_F_MASK				(0x3 << 0) +#define IN_COLOR_F_SHIFT			(0) +#define IN_COLOR_F_YCBCR444			(0x2 << 0) +#define IN_COLOR_F_YCBCR422			(0x1 << 0) +#define IN_COLOR_F_RGB				(0x0 << 0) + +/* EXYNOS_DP_VIDEO_CTL_3 */ +#define IN_YC_COEFFI_MASK			(0x1 << 7) +#define IN_YC_COEFFI_SHIFT			(7) +#define IN_YC_COEFFI_ITU709			(0x1 << 7) +#define IN_YC_COEFFI_ITU601			(0x0 << 7) +#define VID_CHK_UPDATE_TYPE_MASK		(0x1 << 4) +#define VID_CHK_UPDATE_TYPE_SHIFT		(4) +#define VID_CHK_UPDATE_TYPE_1			(0x1 << 4) +#define VID_CHK_UPDATE_TYPE_0			(0x0 << 4) + +/* EXYNOS_DP_VIDEO_CTL_8 */ +#define VID_HRES_TH(x)				(((x) & 0xf) << 4) +#define VID_VRES_TH(x)				(((x) & 0xf) << 0) + +/* EXYNOS_DP_VIDEO_CTL_10 */ +#define FORMAT_SEL				(0x1 << 4) +#define INTERACE_SCAN_CFG			(0x1 << 2) +#define VSYNC_POLARITY_CFG			(0x1 << 1) +#define HSYNC_POLARITY_CFG			(0x1 << 0) + +/* EXYNOS_DP_LANE_MAP */ +#define LANE3_MAP_LOGIC_LANE_0			(0x0 << 6) +#define LANE3_MAP_LOGIC_LANE_1			(0x1 << 6) +#define LANE3_MAP_LOGIC_LANE_2			(0x2 << 6) +#define LANE3_MAP_LOGIC_LANE_3			(0x3 << 6) +#define LANE2_MAP_LOGIC_LANE_0			(0x0 << 4) +#define LANE2_MAP_LOGIC_LANE_1			(0x1 << 4) +#define LANE2_MAP_LOGIC_LANE_2			(0x2 << 4) +#define LANE2_MAP_LOGIC_LANE_3			(0x3 << 4) +#define LANE1_MAP_LOGIC_LANE_0			(0x0 << 2) +#define LANE1_MAP_LOGIC_LANE_1			(0x1 << 2) +#define LANE1_MAP_LOGIC_LANE_2			(0x2 << 2) +#define LANE1_MAP_LOGIC_LANE_3			(0x3 << 2) +#define LANE0_MAP_LOGIC_LANE_0			(0x0 << 0) +#define LANE0_MAP_LOGIC_LANE_1			(0x1 << 0) +#define LANE0_MAP_LOGIC_LANE_2			(0x2 << 0) +#define LANE0_MAP_LOGIC_LANE_3			(0x3 << 0) + +/* EXYNOS_DP_ANALOG_CTL_1 */ +#define TX_TERMINAL_CTRL_50_OHM			(0x1 << 4) + +/* EXYNOS_DP_ANALOG_CTL_2 */ +#define SEL_24M					(0x1 << 3) +#define TX_DVDD_BIT_1_0625V			(0x4 << 0) + +/* EXYNOS_DP_ANALOG_CTL_3 */ +#define DRIVE_DVDD_BIT_1_0625V			(0x4 << 5) +#define VCO_BIT_600_MICRO			(0x5 << 0) + +/* EXYNOS_DP_PLL_FILTER_CTL_1 */ +#define PD_RING_OSC				(0x1 << 6) +#define AUX_TERMINAL_CTRL_50_OHM		(0x2 << 4) +#define TX_CUR1_2X				(0x1 << 2) +#define TX_CUR_16_MA				(0x3 << 0) + +/* EXYNOS_DP_TX_AMP_TUNING_CTL */ +#define CH3_AMP_400_MV				(0x0 << 24) +#define CH2_AMP_400_MV				(0x0 << 16) +#define CH1_AMP_400_MV				(0x0 << 8) +#define CH0_AMP_400_MV				(0x0 << 0) + +/* EXYNOS_DP_AUX_HW_RETRY_CTL */ +#define AUX_BIT_PERIOD_EXPECTED_DELAY(x)	(((x) & 0x7) << 8) +#define AUX_HW_RETRY_INTERVAL_MASK		(0x3 << 3) +#define AUX_HW_RETRY_INTERVAL_600_MICROSECONDS	(0x0 << 3) +#define AUX_HW_RETRY_INTERVAL_800_MICROSECONDS	(0x1 << 3) +#define AUX_HW_RETRY_INTERVAL_1000_MICROSECONDS	(0x2 << 3) +#define AUX_HW_RETRY_INTERVAL_1800_MICROSECONDS	(0x3 << 3) +#define AUX_HW_RETRY_COUNT_SEL(x)		(((x) & 0x7) << 0) + +/* EXYNOS_DP_COMMON_INT_STA_1 */ +#define VSYNC_DET				(0x1 << 7) +#define PLL_LOCK_CHG				(0x1 << 6) +#define SPDIF_ERR				(0x1 << 5) +#define SPDIF_UNSTBL				(0x1 << 4) +#define VID_FORMAT_CHG				(0x1 << 3) +#define AUD_CLK_CHG				(0x1 << 2) +#define VID_CLK_CHG				(0x1 << 1) +#define SW_INT					(0x1 << 0) + +/* EXYNOS_DP_COMMON_INT_STA_2 */ +#define ENC_EN_CHG				(0x1 << 6) +#define HW_BKSV_RDY				(0x1 << 3) +#define HW_SHA_DONE				(0x1 << 2) +#define HW_AUTH_STATE_CHG			(0x1 << 1) +#define HW_AUTH_DONE				(0x1 << 0) + +/* EXYNOS_DP_COMMON_INT_STA_3 */ +#define AFIFO_UNDER				(0x1 << 7) +#define AFIFO_OVER				(0x1 << 6) +#define R0_CHK_FLAG				(0x1 << 5) + +/* EXYNOS_DP_COMMON_INT_STA_4 */ +#define PSR_ACTIVE				(0x1 << 7) +#define PSR_INACTIVE				(0x1 << 6) +#define SPDIF_BI_PHASE_ERR			(0x1 << 5) +#define HOTPLUG_CHG				(0x1 << 2) +#define HPD_LOST				(0x1 << 1) +#define PLUG					(0x1 << 0) + +/* EXYNOS_DP_INT_STA */ +#define INT_HPD					(0x1 << 6) +#define HW_TRAINING_FINISH			(0x1 << 5) +#define RPLY_RECEIV				(0x1 << 1) +#define AUX_ERR					(0x1 << 0) + +/* EXYNOS_DP_INT_CTL */ +#define SOFT_INT_CTRL				(0x1 << 2) +#define INT_POL1				(0x1 << 1) +#define INT_POL0				(0x1 << 0) + +/* EXYNOS_DP_SYS_CTL_1 */ +#define DET_STA					(0x1 << 2) +#define FORCE_DET				(0x1 << 1) +#define DET_CTRL				(0x1 << 0) + +/* EXYNOS_DP_SYS_CTL_2 */ +#define CHA_CRI(x)				(((x) & 0xf) << 4) +#define CHA_STA					(0x1 << 2) +#define FORCE_CHA				(0x1 << 1) +#define CHA_CTRL				(0x1 << 0) + +/* EXYNOS_DP_SYS_CTL_3 */ +#define HPD_STATUS				(0x1 << 6) +#define F_HPD					(0x1 << 5) +#define HPD_CTRL				(0x1 << 4) +#define HDCP_RDY				(0x1 << 3) +#define STRM_VALID				(0x1 << 2) +#define F_VALID					(0x1 << 1) +#define VALID_CTRL				(0x1 << 0) + +/* EXYNOS_DP_SYS_CTL_4 */ +#define FIX_M_AUD				(0x1 << 4) +#define ENHANCED				(0x1 << 3) +#define FIX_M_VID				(0x1 << 2) +#define M_VID_UPDATE_CTRL			(0x3 << 0) + +/* EXYNOS_DP_TRAINING_PTN_SET */ +#define SCRAMBLER_TYPE				(0x1 << 9) +#define HW_LINK_TRAINING_PATTERN		(0x1 << 8) +#define SCRAMBLING_DISABLE			(0x1 << 5) +#define SCRAMBLING_ENABLE			(0x0 << 5) +#define LINK_QUAL_PATTERN_SET_MASK		(0x3 << 2) +#define LINK_QUAL_PATTERN_SET_PRBS7		(0x3 << 2) +#define LINK_QUAL_PATTERN_SET_D10_2		(0x1 << 2) +#define LINK_QUAL_PATTERN_SET_DISABLE		(0x0 << 2) +#define SW_TRAINING_PATTERN_SET_MASK		(0x3 << 0) +#define SW_TRAINING_PATTERN_SET_PTN2		(0x2 << 0) +#define SW_TRAINING_PATTERN_SET_PTN1		(0x1 << 0) +#define SW_TRAINING_PATTERN_SET_NORMAL		(0x0 << 0) + +/* EXYNOS_DP_LN0_LINK_TRAINING_CTL */ +#define PRE_EMPHASIS_SET_MASK			(0x3 << 3) +#define PRE_EMPHASIS_SET_SHIFT			(3) + +/* EXYNOS_DP_DEBUG_CTL */ +#define PLL_LOCK				(0x1 << 4) +#define F_PLL_LOCK				(0x1 << 3) +#define PLL_LOCK_CTRL				(0x1 << 2) +#define PN_INV					(0x1 << 0) + +/* EXYNOS_DP_PLL_CTL */ +#define DP_PLL_PD				(0x1 << 7) +#define DP_PLL_RESET				(0x1 << 6) +#define DP_PLL_LOOP_BIT_DEFAULT			(0x1 << 4) +#define DP_PLL_REF_BIT_1_1250V			(0x5 << 0) +#define DP_PLL_REF_BIT_1_2500V			(0x7 << 0) + +/* EXYNOS_DP_PHY_PD */ +#define DP_PHY_PD				(0x1 << 5) +#define AUX_PD					(0x1 << 4) +#define CH3_PD					(0x1 << 3) +#define CH2_PD					(0x1 << 2) +#define CH1_PD					(0x1 << 1) +#define CH0_PD					(0x1 << 0) + +/* EXYNOS_DP_PHY_TEST */ +#define MACRO_RST				(0x1 << 5) +#define CH1_TEST				(0x1 << 1) +#define CH0_TEST				(0x1 << 0) + +/* EXYNOS_DP_AUX_CH_STA */ +#define AUX_BUSY				(0x1 << 4) +#define AUX_STATUS_MASK				(0xf << 0) + +/* EXYNOS_DP_AUX_CH_DEFER_CTL */ +#define DEFER_CTRL_EN				(0x1 << 7) +#define DEFER_COUNT(x)				(((x) & 0x7f) << 0) + +/* EXYNOS_DP_AUX_RX_COMM */ +#define AUX_RX_COMM_I2C_DEFER			(0x2 << 2) +#define AUX_RX_COMM_AUX_DEFER			(0x2 << 0) + +/* EXYNOS_DP_BUFFER_DATA_CTL */ +#define BUF_CLR					(0x1 << 7) +#define BUF_DATA_COUNT(x)			(((x) & 0x1f) << 0) + +/* EXYNOS_DP_AUX_CH_CTL_1 */ +#define AUX_LENGTH(x)				(((x - 1) & 0xf) << 4) +#define AUX_TX_COMM_MASK			(0xf << 0) +#define AUX_TX_COMM_DP_TRANSACTION		(0x1 << 3) +#define AUX_TX_COMM_I2C_TRANSACTION		(0x0 << 3) +#define AUX_TX_COMM_MOT				(0x1 << 2) +#define AUX_TX_COMM_WRITE			(0x0 << 0) +#define AUX_TX_COMM_READ			(0x1 << 0) + +/* EXYNOS_DP_AUX_ADDR_7_0 */ +#define AUX_ADDR_7_0(x)				(((x) >> 0) & 0xff) + +/* EXYNOS_DP_AUX_ADDR_15_8 */ +#define AUX_ADDR_15_8(x)			(((x) >> 8) & 0xff) + +/* EXYNOS_DP_AUX_ADDR_19_16 */ +#define AUX_ADDR_19_16(x)			(((x) >> 16) & 0x0f) + +/* EXYNOS_DP_AUX_CH_CTL_2 */ +#define ADDR_ONLY				(0x1 << 1) +#define AUX_EN					(0x1 << 0) + +/* EXYNOS_DP_SOC_GENERAL_CTL */ +#define AUDIO_MODE_SPDIF_MODE			(0x1 << 8) +#define AUDIO_MODE_MASTER_MODE			(0x0 << 8) +#define MASTER_VIDEO_INTERLACE_EN		(0x1 << 4) +#define VIDEO_MASTER_CLK_SEL			(0x1 << 2) +#define VIDEO_MASTER_MODE_EN			(0x1 << 1) +#define VIDEO_MODE_MASK				(0x1 << 0) +#define VIDEO_MODE_SLAVE_MODE			(0x1 << 0) +#define VIDEO_MODE_MASTER_MODE			(0x0 << 0) + +#endif /* _EXYNOS_DP_REG_H */ diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c new file mode 100644 index 00000000000..9c8088462c2 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c @@ -0,0 +1,186 @@ +/* exynos_drm_buf.c + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * Author: Inki Dae <inki.dae@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#include <drm/drmP.h> +#include <drm/exynos_drm.h> + +#include "exynos_drm_drv.h" +#include "exynos_drm_gem.h" +#include "exynos_drm_buf.h" +#include "exynos_drm_iommu.h" + +static int lowlevel_buffer_allocate(struct drm_device *dev, +		unsigned int flags, struct exynos_drm_gem_buf *buf) +{ +	int ret = 0; +	enum dma_attr attr; +	unsigned int nr_pages; + +	if (buf->dma_addr) { +		DRM_DEBUG_KMS("already allocated.\n"); +		return 0; +	} + +	init_dma_attrs(&buf->dma_attrs); + +	/* +	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory +	 * region will be allocated else physically contiguous +	 * as possible. +	 */ +	if (!(flags & EXYNOS_BO_NONCONTIG)) +		dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs); + +	/* +	 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping +	 * else cachable mapping. +	 */ +	if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE)) +		attr = DMA_ATTR_WRITE_COMBINE; +	else +		attr = DMA_ATTR_NON_CONSISTENT; + +	dma_set_attr(attr, &buf->dma_attrs); +	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs); + +	nr_pages = buf->size >> PAGE_SHIFT; + +	if (!is_drm_iommu_supported(dev)) { +		dma_addr_t start_addr; +		unsigned int i = 0; + +		buf->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); +		if (!buf->pages) { +			DRM_ERROR("failed to allocate pages.\n"); +			return -ENOMEM; +		} + +		buf->kvaddr = (void __iomem *)dma_alloc_attrs(dev->dev, +					buf->size, +					&buf->dma_addr, GFP_KERNEL, +					&buf->dma_attrs); +		if (!buf->kvaddr) { +			DRM_ERROR("failed to allocate buffer.\n"); +			ret = -ENOMEM; +			goto err_free; +		} + +		start_addr = buf->dma_addr; +		while (i < nr_pages) { +			buf->pages[i] = phys_to_page(start_addr); +			start_addr += PAGE_SIZE; +			i++; +		} +	} else { + +		buf->pages = dma_alloc_attrs(dev->dev, buf->size, +					&buf->dma_addr, GFP_KERNEL, +					&buf->dma_attrs); +		if (!buf->pages) { +			DRM_ERROR("failed to allocate buffer.\n"); +			return -ENOMEM; +		} +	} + +	buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages); +	if (IS_ERR(buf->sgt)) { +		DRM_ERROR("failed to get sg table.\n"); +		ret = PTR_ERR(buf->sgt); +		goto err_free_attrs; +	} + +	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", +			(unsigned long)buf->dma_addr, +			buf->size); + +	return ret; + +err_free_attrs: +	dma_free_attrs(dev->dev, buf->size, buf->pages, +			(dma_addr_t)buf->dma_addr, &buf->dma_attrs); +	buf->dma_addr = (dma_addr_t)NULL; +err_free: +	if (!is_drm_iommu_supported(dev)) +		drm_free_large(buf->pages); + +	return ret; +} + +static void lowlevel_buffer_deallocate(struct drm_device *dev, +		unsigned int flags, struct exynos_drm_gem_buf *buf) +{ +	if (!buf->dma_addr) { +		DRM_DEBUG_KMS("dma_addr is invalid.\n"); +		return; +	} + +	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", +			(unsigned long)buf->dma_addr, +			buf->size); + +	sg_free_table(buf->sgt); + +	kfree(buf->sgt); +	buf->sgt = NULL; + +	if (!is_drm_iommu_supported(dev)) { +		dma_free_attrs(dev->dev, buf->size, buf->kvaddr, +				(dma_addr_t)buf->dma_addr, &buf->dma_attrs); +		drm_free_large(buf->pages); +	} else +		dma_free_attrs(dev->dev, buf->size, buf->pages, +				(dma_addr_t)buf->dma_addr, &buf->dma_attrs); + +	buf->dma_addr = (dma_addr_t)NULL; +} + +struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev, +						unsigned int size) +{ +	struct exynos_drm_gem_buf *buffer; + +	DRM_DEBUG_KMS("desired size = 0x%x\n", size); + +	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); +	if (!buffer) +		return NULL; + +	buffer->size = size; +	return buffer; +} + +void exynos_drm_fini_buf(struct drm_device *dev, +				struct exynos_drm_gem_buf *buffer) +{ +	kfree(buffer); +	buffer = NULL; +} + +int exynos_drm_alloc_buf(struct drm_device *dev, +		struct exynos_drm_gem_buf *buf, unsigned int flags) +{ + +	/* +	 * allocate memory region and set the memory information +	 * to vaddr and dma_addr of a buffer object. +	 */ +	if (lowlevel_buffer_allocate(dev, flags, buf) < 0) +		return -ENOMEM; + +	return 0; +} + +void exynos_drm_free_buf(struct drm_device *dev, +		unsigned int flags, struct exynos_drm_gem_buf *buffer) +{ + +	lowlevel_buffer_deallocate(dev, flags, buffer); +} diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h new file mode 100644 index 00000000000..a6412f19673 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h @@ -0,0 +1,33 @@ +/* exynos_drm_buf.h + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * Author: Inki Dae <inki.dae@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#ifndef _EXYNOS_DRM_BUF_H_ +#define _EXYNOS_DRM_BUF_H_ + +/* create and initialize buffer object. */ +struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev, +						unsigned int size); + +/* destroy buffer object. */ +void exynos_drm_fini_buf(struct drm_device *dev, +				struct exynos_drm_gem_buf *buffer); + +/* allocate physical memory region and setup sgt. */ +int exynos_drm_alloc_buf(struct drm_device *dev, +				struct exynos_drm_gem_buf *buf, +				unsigned int flags); + +/* release physical memory region, and sgt. */ +void exynos_drm_free_buf(struct drm_device *dev, +				unsigned int flags, +				struct exynos_drm_gem_buf *buffer); + +#endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c new file mode 100644 index 00000000000..9a16dbe121d --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c @@ -0,0 +1,258 @@ +/* + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * Authors: + *	Inki Dae <inki.dae@samsung.com> + *	Joonyoung Shim <jy0922.shim@samsung.com> + *	Seung-Woo Kim <sw0312.kim@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> + +#include <drm/exynos_drm.h> +#include "exynos_drm_drv.h" +#include "exynos_drm_encoder.h" +#include "exynos_drm_connector.h" + +#define to_exynos_connector(x)	container_of(x, struct exynos_drm_connector,\ +				drm_connector) + +struct exynos_drm_connector { +	struct drm_connector		drm_connector; +	uint32_t			encoder_id; +	struct exynos_drm_display	*display; +}; + +static int exynos_drm_connector_get_modes(struct drm_connector *connector) +{ +	struct exynos_drm_connector *exynos_connector = +					to_exynos_connector(connector); +	struct exynos_drm_display *display = exynos_connector->display; +	struct edid *edid = NULL; +	unsigned int count = 0; +	int ret; + +	/* +	 * if get_edid() exists then get_edid() callback of hdmi side +	 * is called to get edid data through i2c interface else +	 * get timing from the FIMD driver(display controller). +	 * +	 * P.S. in case of lcd panel, count is always 1 if success +	 * because lcd panel has only one mode. +	 */ +	if (display->ops->get_edid) { +		edid = display->ops->get_edid(display, connector); +		if (IS_ERR_OR_NULL(edid)) { +			ret = PTR_ERR(edid); +			edid = NULL; +			DRM_ERROR("Panel operation get_edid failed %d\n", ret); +			goto out; +		} + +		count = drm_add_edid_modes(connector, edid); +		if (!count) { +			DRM_ERROR("Add edid modes failed %d\n", count); +			goto out; +		} + +		drm_mode_connector_update_edid_property(connector, edid); +	} else { +		struct exynos_drm_panel_info *panel; +		struct drm_display_mode *mode = drm_mode_create(connector->dev); +		if (!mode) { +			DRM_ERROR("failed to create a new display mode.\n"); +			return 0; +		} + +		if (display->ops->get_panel) +			panel = display->ops->get_panel(display); +		else { +			drm_mode_destroy(connector->dev, mode); +			return 0; +		} + +		drm_display_mode_from_videomode(&panel->vm, mode); +		mode->width_mm = panel->width_mm; +		mode->height_mm = panel->height_mm; +		connector->display_info.width_mm = mode->width_mm; +		connector->display_info.height_mm = mode->height_mm; + +		mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; +		drm_mode_set_name(mode); +		drm_mode_probed_add(connector, mode); + +		count = 1; +	} + +out: +	kfree(edid); +	return count; +} + +static int exynos_drm_connector_mode_valid(struct drm_connector *connector, +					    struct drm_display_mode *mode) +{ +	struct exynos_drm_connector *exynos_connector = +					to_exynos_connector(connector); +	struct exynos_drm_display *display = exynos_connector->display; +	int ret = MODE_BAD; + +	DRM_DEBUG_KMS("%s\n", __FILE__); + +	if (display->ops->check_mode) +		if (!display->ops->check_mode(display, mode)) +			ret = MODE_OK; + +	return ret; +} + +static struct drm_encoder *exynos_drm_best_encoder( +		struct drm_connector *connector) +{ +	struct drm_device *dev = connector->dev; +	struct exynos_drm_connector *exynos_connector = +					to_exynos_connector(connector); +	struct drm_mode_object *obj; +	struct drm_encoder *encoder; + +	obj = drm_mode_object_find(dev, exynos_connector->encoder_id, +				   DRM_MODE_OBJECT_ENCODER); +	if (!obj) { +		DRM_DEBUG_KMS("Unknown ENCODER ID %d\n", +				exynos_connector->encoder_id); +		return NULL; +	} + +	encoder = obj_to_encoder(obj); + +	return encoder; +} + +static struct drm_connector_helper_funcs exynos_connector_helper_funcs = { +	.get_modes	= exynos_drm_connector_get_modes, +	.mode_valid	= exynos_drm_connector_mode_valid, +	.best_encoder	= exynos_drm_best_encoder, +}; + +static int exynos_drm_connector_fill_modes(struct drm_connector *connector, +				unsigned int max_width, unsigned int max_height) +{ +	struct exynos_drm_connector *exynos_connector = +					to_exynos_connector(connector); +	struct exynos_drm_display *display = exynos_connector->display; +	unsigned int width, height; + +	width = max_width; +	height = max_height; + +	/* +	 * if specific driver want to find desired_mode using maxmum +	 * resolution then get max width and height from that driver. +	 */ +	if (display->ops->get_max_resol) +		display->ops->get_max_resol(display, &width, &height); + +	return drm_helper_probe_single_connector_modes(connector, width, +							height); +} + +/* get detection status of display device. */ +static enum drm_connector_status +exynos_drm_connector_detect(struct drm_connector *connector, bool force) +{ +	struct exynos_drm_connector *exynos_connector = +					to_exynos_connector(connector); +	struct exynos_drm_display *display = exynos_connector->display; +	enum drm_connector_status status = connector_status_disconnected; + +	if (display->ops->is_connected) { +		if (display->ops->is_connected(display)) +			status = connector_status_connected; +		else +			status = connector_status_disconnected; +	} + +	return status; +} + +static void exynos_drm_connector_destroy(struct drm_connector *connector) +{ +	struct exynos_drm_connector *exynos_connector = +		to_exynos_connector(connector); + +	drm_sysfs_connector_remove(connector); +	drm_connector_cleanup(connector); +	kfree(exynos_connector); +} + +static struct drm_connector_funcs exynos_connector_funcs = { +	.dpms		= drm_helper_connector_dpms, +	.fill_modes	= exynos_drm_connector_fill_modes, +	.detect		= exynos_drm_connector_detect, +	.destroy	= exynos_drm_connector_destroy, +}; + +struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, +						   struct drm_encoder *encoder) +{ +	struct exynos_drm_connector *exynos_connector; +	struct exynos_drm_display *display = exynos_drm_get_display(encoder); +	struct drm_connector *connector; +	int type; +	int err; + +	exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL); +	if (!exynos_connector) +		return NULL; + +	connector = &exynos_connector->drm_connector; + +	switch (display->type) { +	case EXYNOS_DISPLAY_TYPE_HDMI: +		type = DRM_MODE_CONNECTOR_HDMIA; +		connector->interlace_allowed = true; +		connector->polled = DRM_CONNECTOR_POLL_HPD; +		break; +	case EXYNOS_DISPLAY_TYPE_VIDI: +		type = DRM_MODE_CONNECTOR_VIRTUAL; +		connector->polled = DRM_CONNECTOR_POLL_HPD; +		break; +	default: +		type = DRM_MODE_CONNECTOR_Unknown; +		break; +	} + +	drm_connector_init(dev, connector, &exynos_connector_funcs, type); +	drm_connector_helper_add(connector, &exynos_connector_helper_funcs); + +	err = drm_sysfs_connector_add(connector); +	if (err) +		goto err_connector; + +	exynos_connector->encoder_id = encoder->base.id; +	exynos_connector->display = display; +	connector->dpms = DRM_MODE_DPMS_OFF; +	connector->encoder = encoder; + +	err = drm_mode_connector_attach_encoder(connector, encoder); +	if (err) { +		DRM_ERROR("failed to attach a connector to a encoder\n"); +		goto err_sysfs; +	} + +	DRM_DEBUG_KMS("connector has been created\n"); + +	return connector; + +err_sysfs: +	drm_sysfs_connector_remove(connector); +err_connector: +	drm_connector_cleanup(connector); +	kfree(exynos_connector); +	return NULL; +} diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.h b/drivers/gpu/drm/exynos/exynos_drm_connector.h new file mode 100644 index 00000000000..4eb20d78379 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_connector.h @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * Authors: + *	Inki Dae <inki.dae@samsung.com> + *	Joonyoung Shim <jy0922.shim@samsung.com> + *	Seung-Woo Kim <sw0312.kim@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#ifndef _EXYNOS_DRM_CONNECTOR_H_ +#define _EXYNOS_DRM_CONNECTOR_H_ + +struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, +						   struct drm_encoder *encoder); + +#endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c new file mode 100644 index 00000000000..4c9f972eaa0 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_core.c @@ -0,0 +1,161 @@ +/* exynos_drm_core.c + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * Author: + *	Inki Dae <inki.dae@samsung.com> + *	Joonyoung Shim <jy0922.shim@samsung.com> + *	Seung-Woo Kim <sw0312.kim@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#include <drm/drmP.h> +#include "exynos_drm_drv.h" +#include "exynos_drm_crtc.h" +#include "exynos_drm_encoder.h" +#include "exynos_drm_fbdev.h" + +static LIST_HEAD(exynos_drm_subdrv_list); + +int exynos_drm_create_enc_conn(struct drm_device *dev, +					struct exynos_drm_display *display) +{ +	struct drm_encoder *encoder; +	int ret; +	unsigned long possible_crtcs = 0; + +	ret = exynos_drm_crtc_get_pipe_from_type(dev, display->type); +	if (ret < 0) +		return ret; + +	possible_crtcs |= 1 << ret; + +	/* create and initialize a encoder for this sub driver. */ +	encoder = exynos_drm_encoder_create(dev, display, possible_crtcs); +	if (!encoder) { +		DRM_ERROR("failed to create encoder\n"); +		return -EFAULT; +	} + +	display->encoder = encoder; + +	ret = display->ops->create_connector(display, encoder); +	if (ret) { +		DRM_ERROR("failed to create connector ret = %d\n", ret); +		goto err_destroy_encoder; +	} + +	return 0; + +err_destroy_encoder: +	encoder->funcs->destroy(encoder); +	return ret; +} + +int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv) +{ +	if (!subdrv) +		return -EINVAL; + +	list_add_tail(&subdrv->list, &exynos_drm_subdrv_list); + +	return 0; +} +EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register); + +int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) +{ +	if (!subdrv) +		return -EINVAL; + +	list_del(&subdrv->list); + +	return 0; +} +EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister); + +int exynos_drm_device_subdrv_probe(struct drm_device *dev) +{ +	struct exynos_drm_subdrv *subdrv, *n; +	int err; + +	if (!dev) +		return -EINVAL; + +	list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) { +		if (subdrv->probe) { +			subdrv->drm_dev = dev; + +			/* +			 * this probe callback would be called by sub driver +			 * after setting of all resources to this sub driver, +			 * such as clock, irq and register map are done. +			 */ +			err = subdrv->probe(dev, subdrv->dev); +			if (err) { +				DRM_DEBUG("exynos drm subdrv probe failed.\n"); +				list_del(&subdrv->list); +				continue; +			} +		} +	} + +	return 0; +} +EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_probe); + +int exynos_drm_device_subdrv_remove(struct drm_device *dev) +{ +	struct exynos_drm_subdrv *subdrv; + +	if (!dev) { +		WARN(1, "Unexpected drm device unregister!\n"); +		return -EINVAL; +	} + +	list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) { +		if (subdrv->remove) +			subdrv->remove(dev, subdrv->dev); +	} + +	return 0; +} +EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_remove); + +int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file) +{ +	struct exynos_drm_subdrv *subdrv; +	int ret; + +	list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) { +		if (subdrv->open) { +			ret = subdrv->open(dev, subdrv->dev, file); +			if (ret) +				goto err; +		} +	} + +	return 0; + +err: +	list_for_each_entry_reverse(subdrv, &subdrv->list, list) { +		if (subdrv->close) +			subdrv->close(dev, subdrv->dev, file); +	} +	return ret; +} +EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open); + +void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) +{ +	struct exynos_drm_subdrv *subdrv; + +	list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) { +		if (subdrv->close) +			subdrv->close(dev, subdrv->dev, file); +	} +} +EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close); diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c new file mode 100644 index 00000000000..95c9435d026 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -0,0 +1,510 @@ +/* exynos_drm_crtc.c + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * Authors: + *	Inki Dae <inki.dae@samsung.com> + *	Joonyoung Shim <jy0922.shim@samsung.com> + *	Seung-Woo Kim <sw0312.kim@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> + +#include "exynos_drm_crtc.h" +#include "exynos_drm_drv.h" +#include "exynos_drm_encoder.h" +#include "exynos_drm_plane.h" + +#define to_exynos_crtc(x)	container_of(x, struct exynos_drm_crtc,\ +				drm_crtc) + +enum exynos_crtc_mode { +	CRTC_MODE_NORMAL,	/* normal mode */ +	CRTC_MODE_BLANK,	/* The private plane of crtc is blank */ +}; + +/* + * Exynos specific crtc structure. + * + * @drm_crtc: crtc object. + * @drm_plane: pointer of private plane object for this crtc + * @manager: the manager associated with this crtc + * @pipe: a crtc index created at load() with a new crtc object creation + *	and the crtc object would be set to private->crtc array + *	to get a crtc object corresponding to this pipe from private->crtc + *	array when irq interrupt occurred. the reason of using this pipe is that + *	drm framework doesn't support multiple irq yet. + *	we can refer to the crtc to current hardware interrupt occurred through + *	this pipe value. + * @dpms: store the crtc dpms value + * @mode: store the crtc mode value + */ +struct exynos_drm_crtc { +	struct drm_crtc			drm_crtc; +	struct drm_plane		*plane; +	struct exynos_drm_manager	*manager; +	unsigned int			pipe; +	unsigned int			dpms; +	enum exynos_crtc_mode		mode; +	wait_queue_head_t		pending_flip_queue; +	atomic_t			pending_flip; +}; + +static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) +{ +	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); +	struct exynos_drm_manager *manager = exynos_crtc->manager; + +	DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode); + +	if (exynos_crtc->dpms == mode) { +		DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n"); +		return; +	} + +	if (mode > DRM_MODE_DPMS_ON) { +		/* wait for the completion of page flip. */ +		wait_event(exynos_crtc->pending_flip_queue, +				atomic_read(&exynos_crtc->pending_flip) == 0); +		drm_vblank_off(crtc->dev, exynos_crtc->pipe); +	} + +	if (manager->ops->dpms) +		manager->ops->dpms(manager, mode); + +	exynos_crtc->dpms = mode; +} + +static void exynos_drm_crtc_prepare(struct drm_crtc *crtc) +{ +	/* drm framework doesn't check NULL. */ +} + +static void exynos_drm_crtc_commit(struct drm_crtc *crtc) +{ +	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); +	struct exynos_drm_manager *manager = exynos_crtc->manager; + +	exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON); + +	exynos_plane_commit(exynos_crtc->plane); + +	if (manager->ops->commit) +		manager->ops->commit(manager); + +	exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_ON); +} + +static bool +exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc, +			    const struct drm_display_mode *mode, +			    struct drm_display_mode *adjusted_mode) +{ +	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); +	struct exynos_drm_manager *manager = exynos_crtc->manager; + +	if (manager->ops->mode_fixup) +		return manager->ops->mode_fixup(manager, mode, adjusted_mode); + +	return true; +} + +static int +exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, +			  struct drm_display_mode *adjusted_mode, int x, int y, +			  struct drm_framebuffer *old_fb) +{ +	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); +	struct exynos_drm_manager *manager = exynos_crtc->manager; +	struct drm_plane *plane = exynos_crtc->plane; +	unsigned int crtc_w; +	unsigned int crtc_h; +	int ret; + +	/* +	 * copy the mode data adjusted by mode_fixup() into crtc->mode +	 * so that hardware can be seet to proper mode. +	 */ +	memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode)); + +	crtc_w = crtc->primary->fb->width - x; +	crtc_h = crtc->primary->fb->height - y; + +	if (manager->ops->mode_set) +		manager->ops->mode_set(manager, &crtc->mode); + +	ret = exynos_plane_mode_set(plane, crtc, crtc->primary->fb, 0, 0, crtc_w, crtc_h, +				    x, y, crtc_w, crtc_h); +	if (ret) +		return ret; + +	plane->crtc = crtc; +	plane->fb = crtc->primary->fb; +	drm_framebuffer_reference(plane->fb); + +	return 0; +} + +static int exynos_drm_crtc_mode_set_commit(struct drm_crtc *crtc, int x, int y, +					  struct drm_framebuffer *old_fb) +{ +	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); +	struct drm_plane *plane = exynos_crtc->plane; +	unsigned int crtc_w; +	unsigned int crtc_h; +	int ret; + +	/* when framebuffer changing is requested, crtc's dpms should be on */ +	if (exynos_crtc->dpms > DRM_MODE_DPMS_ON) { +		DRM_ERROR("failed framebuffer changing request.\n"); +		return -EPERM; +	} + +	crtc_w = crtc->primary->fb->width - x; +	crtc_h = crtc->primary->fb->height - y; + +	ret = exynos_plane_mode_set(plane, crtc, crtc->primary->fb, 0, 0, crtc_w, crtc_h, +				    x, y, crtc_w, crtc_h); +	if (ret) +		return ret; + +	exynos_drm_crtc_commit(crtc); + +	return 0; +} + +static int exynos_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, +					  struct drm_framebuffer *old_fb) +{ +	return exynos_drm_crtc_mode_set_commit(crtc, x, y, old_fb); +} + +static void exynos_drm_crtc_disable(struct drm_crtc *crtc) +{ +	struct drm_plane *plane; +	int ret; + +	exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + +	drm_for_each_legacy_plane(plane, &crtc->dev->mode_config.plane_list) { +		if (plane->crtc != crtc) +			continue; + +		ret = plane->funcs->disable_plane(plane); +		if (ret) +			DRM_ERROR("Failed to disable plane %d\n", ret); +	} +} + +static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { +	.dpms		= exynos_drm_crtc_dpms, +	.prepare	= exynos_drm_crtc_prepare, +	.commit		= exynos_drm_crtc_commit, +	.mode_fixup	= exynos_drm_crtc_mode_fixup, +	.mode_set	= exynos_drm_crtc_mode_set, +	.mode_set_base	= exynos_drm_crtc_mode_set_base, +	.disable	= exynos_drm_crtc_disable, +}; + +static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc, +				     struct drm_framebuffer *fb, +				     struct drm_pending_vblank_event *event, +				     uint32_t page_flip_flags) +{ +	struct drm_device *dev = crtc->dev; +	struct exynos_drm_private *dev_priv = dev->dev_private; +	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); +	struct drm_framebuffer *old_fb = crtc->primary->fb; +	int ret = -EINVAL; + +	/* when the page flip is requested, crtc's dpms should be on */ +	if (exynos_crtc->dpms > DRM_MODE_DPMS_ON) { +		DRM_ERROR("failed page flip request.\n"); +		return -EINVAL; +	} + +	mutex_lock(&dev->struct_mutex); + +	if (event) { +		/* +		 * the pipe from user always is 0 so we can set pipe number +		 * of current owner to event. +		 */ +		event->pipe = exynos_crtc->pipe; + +		ret = drm_vblank_get(dev, exynos_crtc->pipe); +		if (ret) { +			DRM_DEBUG("failed to acquire vblank counter\n"); + +			goto out; +		} + +		spin_lock_irq(&dev->event_lock); +		list_add_tail(&event->base.link, +				&dev_priv->pageflip_event_list); +		atomic_set(&exynos_crtc->pending_flip, 1); +		spin_unlock_irq(&dev->event_lock); + +		crtc->primary->fb = fb; +		ret = exynos_drm_crtc_mode_set_commit(crtc, crtc->x, crtc->y, +						    NULL); +		if (ret) { +			crtc->primary->fb = old_fb; + +			spin_lock_irq(&dev->event_lock); +			drm_vblank_put(dev, exynos_crtc->pipe); +			list_del(&event->base.link); +			spin_unlock_irq(&dev->event_lock); + +			goto out; +		} +	} +out: +	mutex_unlock(&dev->struct_mutex); +	return ret; +} + +static void exynos_drm_crtc_destroy(struct drm_crtc *crtc) +{ +	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); +	struct exynos_drm_private *private = crtc->dev->dev_private; + +	private->crtc[exynos_crtc->pipe] = NULL; + +	drm_crtc_cleanup(crtc); +	kfree(exynos_crtc); +} + +static int exynos_drm_crtc_set_property(struct drm_crtc *crtc, +					struct drm_property *property, +					uint64_t val) +{ +	struct drm_device *dev = crtc->dev; +	struct exynos_drm_private *dev_priv = dev->dev_private; +	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); + +	if (property == dev_priv->crtc_mode_property) { +		enum exynos_crtc_mode mode = val; + +		if (mode == exynos_crtc->mode) +			return 0; + +		exynos_crtc->mode = mode; + +		switch (mode) { +		case CRTC_MODE_NORMAL: +			exynos_drm_crtc_commit(crtc); +			break; +		case CRTC_MODE_BLANK: +			exynos_plane_dpms(exynos_crtc->plane, +					  DRM_MODE_DPMS_OFF); +			break; +		default: +			break; +		} + +		return 0; +	} + +	return -EINVAL; +} + +static struct drm_crtc_funcs exynos_crtc_funcs = { +	.set_config	= drm_crtc_helper_set_config, +	.page_flip	= exynos_drm_crtc_page_flip, +	.destroy	= exynos_drm_crtc_destroy, +	.set_property	= exynos_drm_crtc_set_property, +}; + +static const struct drm_prop_enum_list mode_names[] = { +	{ CRTC_MODE_NORMAL, "normal" }, +	{ CRTC_MODE_BLANK, "blank" }, +}; + +static void exynos_drm_crtc_attach_mode_property(struct drm_crtc *crtc) +{ +	struct drm_device *dev = crtc->dev; +	struct exynos_drm_private *dev_priv = dev->dev_private; +	struct drm_property *prop; + +	prop = dev_priv->crtc_mode_property; +	if (!prop) { +		prop = drm_property_create_enum(dev, 0, "mode", mode_names, +						ARRAY_SIZE(mode_names)); +		if (!prop) +			return; + +		dev_priv->crtc_mode_property = prop; +	} + +	drm_object_attach_property(&crtc->base, prop, 0); +} + +int exynos_drm_crtc_create(struct exynos_drm_manager *manager) +{ +	struct exynos_drm_crtc *exynos_crtc; +	struct exynos_drm_private *private = manager->drm_dev->dev_private; +	struct drm_crtc *crtc; + +	exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL); +	if (!exynos_crtc) +		return -ENOMEM; + +	init_waitqueue_head(&exynos_crtc->pending_flip_queue); +	atomic_set(&exynos_crtc->pending_flip, 0); + +	exynos_crtc->dpms = DRM_MODE_DPMS_OFF; +	exynos_crtc->manager = manager; +	exynos_crtc->pipe = manager->pipe; +	exynos_crtc->plane = exynos_plane_init(manager->drm_dev, +				1 << manager->pipe, true); +	if (!exynos_crtc->plane) { +		kfree(exynos_crtc); +		return -ENOMEM; +	} + +	manager->crtc = &exynos_crtc->drm_crtc; +	crtc = &exynos_crtc->drm_crtc; + +	private->crtc[manager->pipe] = crtc; + +	drm_crtc_init(manager->drm_dev, crtc, &exynos_crtc_funcs); +	drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs); + +	exynos_drm_crtc_attach_mode_property(crtc); + +	return 0; +} + +int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe) +{ +	struct exynos_drm_private *private = dev->dev_private; +	struct exynos_drm_crtc *exynos_crtc = +		to_exynos_crtc(private->crtc[pipe]); +	struct exynos_drm_manager *manager = exynos_crtc->manager; + +	if (exynos_crtc->dpms != DRM_MODE_DPMS_ON) +		return -EPERM; + +	if (manager->ops->enable_vblank) +		manager->ops->enable_vblank(manager); + +	return 0; +} + +void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe) +{ +	struct exynos_drm_private *private = dev->dev_private; +	struct exynos_drm_crtc *exynos_crtc = +		to_exynos_crtc(private->crtc[pipe]); +	struct exynos_drm_manager *manager = exynos_crtc->manager; + +	if (exynos_crtc->dpms != DRM_MODE_DPMS_ON) +		return; + +	if (manager->ops->disable_vblank) +		manager->ops->disable_vblank(manager); +} + +void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe) +{ +	struct exynos_drm_private *dev_priv = dev->dev_private; +	struct drm_pending_vblank_event *e, *t; +	struct drm_crtc *drm_crtc = dev_priv->crtc[pipe]; +	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(drm_crtc); +	unsigned long flags; + +	spin_lock_irqsave(&dev->event_lock, flags); + +	list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list, +			base.link) { +		/* if event's pipe isn't same as crtc then ignore it. */ +		if (pipe != e->pipe) +			continue; + +		list_del(&e->base.link); +		drm_send_vblank_event(dev, -1, e); +		drm_vblank_put(dev, pipe); +		atomic_set(&exynos_crtc->pending_flip, 0); +		wake_up(&exynos_crtc->pending_flip_queue); +	} + +	spin_unlock_irqrestore(&dev->event_lock, flags); +} + +void exynos_drm_crtc_plane_mode_set(struct drm_crtc *crtc, +			struct exynos_drm_overlay *overlay) +{ +	struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager; + +	if (manager->ops->win_mode_set) +		manager->ops->win_mode_set(manager, overlay); +} + +void exynos_drm_crtc_plane_commit(struct drm_crtc *crtc, int zpos) +{ +	struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager; + +	if (manager->ops->win_commit) +		manager->ops->win_commit(manager, zpos); +} + +void exynos_drm_crtc_plane_enable(struct drm_crtc *crtc, int zpos) +{ +	struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager; + +	if (manager->ops->win_enable) +		manager->ops->win_enable(manager, zpos); +} + +void exynos_drm_crtc_plane_disable(struct drm_crtc *crtc, int zpos) +{ +	struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager; + +	if (manager->ops->win_disable) +		manager->ops->win_disable(manager, zpos); +} + +void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb) +{ +	struct exynos_drm_manager *manager; +	struct drm_device *dev = fb->dev; +	struct drm_crtc *crtc; + +	/* +	 * make sure that overlay data are updated to real hardware +	 * for all encoders. +	 */ +	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { +		manager = to_exynos_crtc(crtc)->manager; + +		/* +		 * wait for vblank interrupt +		 * - this makes sure that overlay data are updated to +		 *	real hardware. +		 */ +		if (manager->ops->wait_for_vblank) +			manager->ops->wait_for_vblank(manager); +	} +} + +int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev, +					unsigned int out_type) +{ +	struct drm_crtc *crtc; + +	list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) { +		struct exynos_drm_crtc *exynos_crtc; + +		exynos_crtc = to_exynos_crtc(crtc); +		if (exynos_crtc->manager->type == out_type) +			return exynos_crtc->manager->pipe; +	} + +	return -EPERM; +} diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h new file mode 100644 index 00000000000..9f74b10a8a0 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h @@ -0,0 +1,39 @@ +/* exynos_drm_crtc.h + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * Authors: + *	Inki Dae <inki.dae@samsung.com> + *	Joonyoung Shim <jy0922.shim@samsung.com> + *	Seung-Woo Kim <sw0312.kim@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#ifndef _EXYNOS_DRM_CRTC_H_ +#define _EXYNOS_DRM_CRTC_H_ + +struct drm_device; +struct drm_crtc; +struct exynos_drm_manager; +struct exynos_drm_overlay; + +int exynos_drm_crtc_create(struct exynos_drm_manager *manager); +int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe); +void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe); +void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe); +void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb); + +void exynos_drm_crtc_plane_mode_set(struct drm_crtc *crtc, +			struct exynos_drm_overlay *overlay); +void exynos_drm_crtc_plane_commit(struct drm_crtc *crtc, int zpos); +void exynos_drm_crtc_plane_enable(struct drm_crtc *crtc, int zpos); +void exynos_drm_crtc_plane_disable(struct drm_crtc *crtc, int zpos); + +/* This function gets pipe value to crtc device matched with out_type. */ +int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev, +					unsigned int out_type); + +#endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c new file mode 100644 index 00000000000..2a3ad24276f --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c @@ -0,0 +1,285 @@ +/* exynos_drm_dmabuf.c + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * Author: Inki Dae <inki.dae@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#include <drm/drmP.h> +#include <drm/exynos_drm.h> +#include "exynos_drm_dmabuf.h" +#include "exynos_drm_drv.h" +#include "exynos_drm_gem.h" + +#include <linux/dma-buf.h> + +struct exynos_drm_dmabuf_attachment { +	struct sg_table sgt; +	enum dma_data_direction dir; +	bool is_mapped; +}; + +static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf) +{ +	return to_exynos_gem_obj(buf->priv); +} + +static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf, +					struct device *dev, +					struct dma_buf_attachment *attach) +{ +	struct exynos_drm_dmabuf_attachment *exynos_attach; + +	exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL); +	if (!exynos_attach) +		return -ENOMEM; + +	exynos_attach->dir = DMA_NONE; +	attach->priv = exynos_attach; + +	return 0; +} + +static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf, +					struct dma_buf_attachment *attach) +{ +	struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv; +	struct sg_table *sgt; + +	if (!exynos_attach) +		return; + +	sgt = &exynos_attach->sgt; + +	if (exynos_attach->dir != DMA_NONE) +		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, +				exynos_attach->dir); + +	sg_free_table(sgt); +	kfree(exynos_attach); +	attach->priv = NULL; +} + +static struct sg_table * +		exynos_gem_map_dma_buf(struct dma_buf_attachment *attach, +					enum dma_data_direction dir) +{ +	struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv; +	struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf); +	struct drm_device *dev = gem_obj->base.dev; +	struct exynos_drm_gem_buf *buf; +	struct scatterlist *rd, *wr; +	struct sg_table *sgt = NULL; +	unsigned int i; +	int nents, ret; + +	/* just return current sgt if already requested. */ +	if (exynos_attach->dir == dir && exynos_attach->is_mapped) +		return &exynos_attach->sgt; + +	buf = gem_obj->buffer; +	if (!buf) { +		DRM_ERROR("buffer is null.\n"); +		return ERR_PTR(-ENOMEM); +	} + +	sgt = &exynos_attach->sgt; + +	ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL); +	if (ret) { +		DRM_ERROR("failed to alloc sgt.\n"); +		return ERR_PTR(-ENOMEM); +	} + +	mutex_lock(&dev->struct_mutex); + +	rd = buf->sgt->sgl; +	wr = sgt->sgl; +	for (i = 0; i < sgt->orig_nents; ++i) { +		sg_set_page(wr, sg_page(rd), rd->length, rd->offset); +		rd = sg_next(rd); +		wr = sg_next(wr); +	} + +	if (dir != DMA_NONE) { +		nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); +		if (!nents) { +			DRM_ERROR("failed to map sgl with iommu.\n"); +			sg_free_table(sgt); +			sgt = ERR_PTR(-EIO); +			goto err_unlock; +		} +	} + +	exynos_attach->is_mapped = true; +	exynos_attach->dir = dir; +	attach->priv = exynos_attach; + +	DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size); + +err_unlock: +	mutex_unlock(&dev->struct_mutex); +	return sgt; +} + +static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach, +						struct sg_table *sgt, +						enum dma_data_direction dir) +{ +	/* Nothing to do. */ +} + +static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, +						unsigned long page_num) +{ +	/* TODO */ + +	return NULL; +} + +static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, +						unsigned long page_num, +						void *addr) +{ +	/* TODO */ +} + +static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf, +					unsigned long page_num) +{ +	/* TODO */ + +	return NULL; +} + +static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf, +					unsigned long page_num, void *addr) +{ +	/* TODO */ +} + +static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf, +	struct vm_area_struct *vma) +{ +	return -ENOTTY; +} + +static struct dma_buf_ops exynos_dmabuf_ops = { +	.attach			= exynos_gem_attach_dma_buf, +	.detach			= exynos_gem_detach_dma_buf, +	.map_dma_buf		= exynos_gem_map_dma_buf, +	.unmap_dma_buf		= exynos_gem_unmap_dma_buf, +	.kmap			= exynos_gem_dmabuf_kmap, +	.kmap_atomic		= exynos_gem_dmabuf_kmap_atomic, +	.kunmap			= exynos_gem_dmabuf_kunmap, +	.kunmap_atomic		= exynos_gem_dmabuf_kunmap_atomic, +	.mmap			= exynos_gem_dmabuf_mmap, +	.release		= drm_gem_dmabuf_release, +}; + +struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev, +				struct drm_gem_object *obj, int flags) +{ +	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); + +	return dma_buf_export(obj, &exynos_dmabuf_ops, +				exynos_gem_obj->base.size, flags); +} + +struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, +				struct dma_buf *dma_buf) +{ +	struct dma_buf_attachment *attach; +	struct sg_table *sgt; +	struct scatterlist *sgl; +	struct exynos_drm_gem_obj *exynos_gem_obj; +	struct exynos_drm_gem_buf *buffer; +	int ret; + +	/* is this one of own objects? */ +	if (dma_buf->ops == &exynos_dmabuf_ops) { +		struct drm_gem_object *obj; + +		obj = dma_buf->priv; + +		/* is it from our device? */ +		if (obj->dev == drm_dev) { +			/* +			 * Importing dmabuf exported from out own gem increases +			 * refcount on gem itself instead of f_count of dmabuf. +			 */ +			drm_gem_object_reference(obj); +			return obj; +		} +	} + +	attach = dma_buf_attach(dma_buf, drm_dev->dev); +	if (IS_ERR(attach)) +		return ERR_PTR(-EINVAL); + +	get_dma_buf(dma_buf); + +	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); +	if (IS_ERR(sgt)) { +		ret = PTR_ERR(sgt); +		goto err_buf_detach; +	} + +	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); +	if (!buffer) { +		ret = -ENOMEM; +		goto err_unmap_attach; +	} + +	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size); +	if (!exynos_gem_obj) { +		ret = -ENOMEM; +		goto err_free_buffer; +	} + +	sgl = sgt->sgl; + +	buffer->size = dma_buf->size; +	buffer->dma_addr = sg_dma_address(sgl); + +	if (sgt->nents == 1) { +		/* always physically continuous memory if sgt->nents is 1. */ +		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; +	} else { +		/* +		 * this case could be CONTIG or NONCONTIG type but for now +		 * sets NONCONTIG. +		 * TODO. we have to find a way that exporter can notify +		 * the type of its own buffer to importer. +		 */ +		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG; +	} + +	exynos_gem_obj->buffer = buffer; +	buffer->sgt = sgt; +	exynos_gem_obj->base.import_attach = attach; + +	DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr, +								buffer->size); + +	return &exynos_gem_obj->base; + +err_free_buffer: +	kfree(buffer); +	buffer = NULL; +err_unmap_attach: +	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); +err_buf_detach: +	dma_buf_detach(dma_buf, attach); +	dma_buf_put(dma_buf); + +	return ERR_PTR(ret); +} + +MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); +MODULE_DESCRIPTION("Samsung SoC DRM DMABUF Module"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h new file mode 100644 index 00000000000..49acfafb4fd --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h @@ -0,0 +1,25 @@ +/* exynos_drm_dmabuf.h + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * Author: Inki Dae <inki.dae@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#ifndef _EXYNOS_DRM_DMABUF_H_ +#define _EXYNOS_DRM_DMABUF_H_ + +#ifdef CONFIG_DRM_EXYNOS_DMABUF +struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev, +				struct drm_gem_object *obj, int flags); + +struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, +						struct dma_buf *dma_buf); +#else +#define exynos_dmabuf_prime_export		NULL +#define exynos_dmabuf_prime_import		NULL +#endif +#endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c new file mode 100644 index 00000000000..9e530f205ad --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c @@ -0,0 +1,347 @@ +/* + * Exynos DRM Parallel output support. + * + * Copyright (c) 2014 Samsung Electronics Co., Ltd + * + * Contacts: Andrzej Hajda <a.hajda@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_panel.h> + +#include <linux/regulator/consumer.h> + +#include <video/of_videomode.h> +#include <video/videomode.h> + +#include "exynos_drm_drv.h" + +struct exynos_dpi { +	struct device *dev; +	struct device_node *panel_node; + +	struct drm_panel *panel; +	struct drm_connector connector; +	struct drm_encoder *encoder; + +	struct videomode *vm; +	int dpms_mode; +}; + +#define connector_to_dpi(c) container_of(c, struct exynos_dpi, connector) + +static enum drm_connector_status +exynos_dpi_detect(struct drm_connector *connector, bool force) +{ +	struct exynos_dpi *ctx = connector_to_dpi(connector); + +	if (ctx->panel && !ctx->panel->connector) +		drm_panel_attach(ctx->panel, &ctx->connector); + +	return connector_status_connected; +} + +static void exynos_dpi_connector_destroy(struct drm_connector *connector) +{ +	drm_sysfs_connector_remove(connector); +	drm_connector_cleanup(connector); +} + +static struct drm_connector_funcs exynos_dpi_connector_funcs = { +	.dpms = drm_helper_connector_dpms, +	.detect = exynos_dpi_detect, +	.fill_modes = drm_helper_probe_single_connector_modes, +	.destroy = exynos_dpi_connector_destroy, +}; + +static int exynos_dpi_get_modes(struct drm_connector *connector) +{ +	struct exynos_dpi *ctx = connector_to_dpi(connector); + +	/* fimd timings gets precedence over panel modes */ +	if (ctx->vm) { +		struct drm_display_mode *mode; + +		mode = drm_mode_create(connector->dev); +		if (!mode) { +			DRM_ERROR("failed to create a new display mode\n"); +			return 0; +		} +		drm_display_mode_from_videomode(ctx->vm, mode); +		mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; +		drm_mode_probed_add(connector, mode); +		return 1; +	} + +	if (ctx->panel) +		return ctx->panel->funcs->get_modes(ctx->panel); + +	return 0; +} + +static struct drm_encoder * +exynos_dpi_best_encoder(struct drm_connector *connector) +{ +	struct exynos_dpi *ctx = connector_to_dpi(connector); + +	return ctx->encoder; +} + +static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = { +	.get_modes = exynos_dpi_get_modes, +	.best_encoder = exynos_dpi_best_encoder, +}; + +static int exynos_dpi_create_connector(struct exynos_drm_display *display, +				       struct drm_encoder *encoder) +{ +	struct exynos_dpi *ctx = display->ctx; +	struct drm_connector *connector = &ctx->connector; +	int ret; + +	ctx->encoder = encoder; + +	connector->polled = DRM_CONNECTOR_POLL_HPD; + +	ret = drm_connector_init(encoder->dev, connector, +				 &exynos_dpi_connector_funcs, +				 DRM_MODE_CONNECTOR_VGA); +	if (ret) { +		DRM_ERROR("failed to initialize connector with drm\n"); +		return ret; +	} + +	drm_connector_helper_add(connector, &exynos_dpi_connector_helper_funcs); +	drm_sysfs_connector_add(connector); +	drm_mode_connector_attach_encoder(connector, encoder); + +	return 0; +} + +static void exynos_dpi_poweron(struct exynos_dpi *ctx) +{ +	if (ctx->panel) +		drm_panel_enable(ctx->panel); +} + +static void exynos_dpi_poweroff(struct exynos_dpi *ctx) +{ +	if (ctx->panel) +		drm_panel_disable(ctx->panel); +} + +static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode) +{ +	struct exynos_dpi *ctx = display->ctx; + +	switch (mode) { +	case DRM_MODE_DPMS_ON: +		if (ctx->dpms_mode != DRM_MODE_DPMS_ON) +				exynos_dpi_poweron(ctx); +			break; +	case DRM_MODE_DPMS_STANDBY: +	case DRM_MODE_DPMS_SUSPEND: +	case DRM_MODE_DPMS_OFF: +		if (ctx->dpms_mode == DRM_MODE_DPMS_ON) +			exynos_dpi_poweroff(ctx); +		break; +	default: +		break; +	} +	ctx->dpms_mode = mode; +} + +static struct exynos_drm_display_ops exynos_dpi_display_ops = { +	.create_connector = exynos_dpi_create_connector, +	.dpms = exynos_dpi_dpms +}; + +static struct exynos_drm_display exynos_dpi_display = { +	.type = EXYNOS_DISPLAY_TYPE_LCD, +	.ops = &exynos_dpi_display_ops, +}; + +/* of_* functions will be removed after merge of of_graph patches */ +static struct device_node * +of_get_child_by_name_reg(struct device_node *parent, const char *name, u32 reg) +{ +	struct device_node *np; + +	for_each_child_of_node(parent, np) { +		u32 r; + +		if (!np->name || of_node_cmp(np->name, name)) +			continue; + +		if (of_property_read_u32(np, "reg", &r) < 0) +			r = 0; + +		if (reg == r) +			break; +	} + +	return np; +} + +static struct device_node *of_graph_get_port_by_reg(struct device_node *parent, +						    u32 reg) +{ +	struct device_node *ports, *port; + +	ports = of_get_child_by_name(parent, "ports"); +	if (ports) +		parent = ports; + +	port = of_get_child_by_name_reg(parent, "port", reg); + +	of_node_put(ports); + +	return port; +} + +static struct device_node * +of_graph_get_endpoint_by_reg(struct device_node *port, u32 reg) +{ +	return of_get_child_by_name_reg(port, "endpoint", reg); +} + +static struct device_node * +of_graph_get_remote_port_parent(const struct device_node *node) +{ +	struct device_node *np; +	unsigned int depth; + +	np = of_parse_phandle(node, "remote-endpoint", 0); + +	/* Walk 3 levels up only if there is 'ports' node. */ +	for (depth = 3; depth && np; depth--) { +		np = of_get_next_parent(np); +		if (depth == 2 && of_node_cmp(np->name, "ports")) +			break; +	} +	return np; +} + +enum { +	FIMD_PORT_IN0, +	FIMD_PORT_IN1, +	FIMD_PORT_IN2, +	FIMD_PORT_RGB, +	FIMD_PORT_WRB, +}; + +static struct device_node *exynos_dpi_of_find_panel_node(struct device *dev) +{ +	struct device_node *np, *ep; + +	np = of_graph_get_port_by_reg(dev->of_node, FIMD_PORT_RGB); +	if (!np) +		return NULL; + +	ep = of_graph_get_endpoint_by_reg(np, 0); +	of_node_put(np); +	if (!ep) +		return NULL; + +	np = of_graph_get_remote_port_parent(ep); +	of_node_put(ep); + +	return np; +} + +static int exynos_dpi_parse_dt(struct exynos_dpi *ctx) +{ +	struct device *dev = ctx->dev; +	struct device_node *dn = dev->of_node; +	struct device_node *np; + +	ctx->panel_node = exynos_dpi_of_find_panel_node(dev); + +	np = of_get_child_by_name(dn, "display-timings"); +	if (np) { +		struct videomode *vm; +		int ret; + +		of_node_put(np); + +		vm = devm_kzalloc(dev, sizeof(*ctx->vm), GFP_KERNEL); +		if (!vm) +			return -ENOMEM; + +		ret = of_get_videomode(dn, vm, 0); +		if (ret < 0) { +			devm_kfree(dev, vm); +			return ret; +		} + +		ctx->vm = vm; + +		return 0; +	} + +	if (!ctx->panel_node) +		return -EINVAL; + +	return 0; +} + +struct exynos_drm_display *exynos_dpi_probe(struct device *dev) +{ +	struct exynos_dpi *ctx; +	int ret; + +	ret = exynos_drm_component_add(dev, +					EXYNOS_DEVICE_TYPE_CONNECTOR, +					exynos_dpi_display.type); +	if (ret) +		return ERR_PTR(ret); + +	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); +	if (!ctx) +		goto err_del_component; + +	ctx->dev = dev; +	exynos_dpi_display.ctx = ctx; +	ctx->dpms_mode = DRM_MODE_DPMS_OFF; + +	ret = exynos_dpi_parse_dt(ctx); +	if (ret < 0) { +		devm_kfree(dev, ctx); +		goto err_del_component; +	} + +	if (ctx->panel_node) { +		ctx->panel = of_drm_find_panel(ctx->panel_node); +		if (!ctx->panel) { +			exynos_drm_component_del(dev, +						EXYNOS_DEVICE_TYPE_CONNECTOR); +			return ERR_PTR(-EPROBE_DEFER); +		} +	} + +	return &exynos_dpi_display; + +err_del_component: +	exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR); + +	return NULL; +} + +int exynos_dpi_remove(struct device *dev) +{ +	struct drm_encoder *encoder = exynos_dpi_display.encoder; +	struct exynos_dpi *ctx = exynos_dpi_display.ctx; + +	exynos_dpi_dpms(&exynos_dpi_display, DRM_MODE_DPMS_OFF); +	encoder->funcs->destroy(encoder); +	drm_connector_cleanup(&ctx->connector); + +	exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR); + +	return 0; +} diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c new file mode 100644 index 00000000000..ab7d182063c --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -0,0 +1,795 @@ +/* + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * Authors: + *	Inki Dae <inki.dae@samsung.com> + *	Joonyoung Shim <jy0922.shim@samsung.com> + *	Seung-Woo Kim <sw0312.kim@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#include <linux/pm_runtime.h> +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> + +#include <linux/anon_inodes.h> +#include <linux/component.h> + +#include <drm/exynos_drm.h> + +#include "exynos_drm_drv.h" +#include "exynos_drm_crtc.h" +#include "exynos_drm_encoder.h" +#include "exynos_drm_fbdev.h" +#include "exynos_drm_fb.h" +#include "exynos_drm_gem.h" +#include "exynos_drm_plane.h" +#include "exynos_drm_vidi.h" +#include "exynos_drm_dmabuf.h" +#include "exynos_drm_g2d.h" +#include "exynos_drm_ipp.h" +#include "exynos_drm_iommu.h" + +#define DRIVER_NAME	"exynos" +#define DRIVER_DESC	"Samsung SoC DRM" +#define DRIVER_DATE	"20110530" +#define DRIVER_MAJOR	1 +#define DRIVER_MINOR	0 + +#define VBLANK_OFF_DELAY	50000 + +static struct platform_device *exynos_drm_pdev; + +static DEFINE_MUTEX(drm_component_lock); +static LIST_HEAD(drm_component_list); + +struct component_dev { +	struct list_head list; +	struct device *crtc_dev; +	struct device *conn_dev; +	enum exynos_drm_output_type out_type; +	unsigned int dev_type_flag; +}; + +static int exynos_drm_load(struct drm_device *dev, unsigned long flags) +{ +	struct exynos_drm_private *private; +	int ret; +	int nr; + +	private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL); +	if (!private) +		return -ENOMEM; + +	INIT_LIST_HEAD(&private->pageflip_event_list); +	dev_set_drvdata(dev->dev, dev); +	dev->dev_private = (void *)private; + +	/* +	 * create mapping to manage iommu table and set a pointer to iommu +	 * mapping structure to iommu_mapping of private data. +	 * also this iommu_mapping can be used to check if iommu is supported +	 * or not. +	 */ +	ret = drm_create_iommu_mapping(dev); +	if (ret < 0) { +		DRM_ERROR("failed to create iommu mapping.\n"); +		goto err_free_private; +	} + +	drm_mode_config_init(dev); + +	exynos_drm_mode_config_init(dev); + +	for (nr = 0; nr < MAX_PLANE; nr++) { +		struct drm_plane *plane; +		unsigned long possible_crtcs = (1 << MAX_CRTC) - 1; + +		plane = exynos_plane_init(dev, possible_crtcs, false); +		if (!plane) +			goto err_mode_config_cleanup; +	} + +	/* init kms poll for handling hpd */ +	drm_kms_helper_poll_init(dev); + +	ret = drm_vblank_init(dev, MAX_CRTC); +	if (ret) +		goto err_mode_config_cleanup; + +	/* setup possible_clones. */ +	exynos_drm_encoder_setup(dev); + +	drm_vblank_offdelay = VBLANK_OFF_DELAY; + +	platform_set_drvdata(dev->platformdev, dev); + +	/* Try to bind all sub drivers. */ +	ret = component_bind_all(dev->dev, dev); +	if (ret) +		goto err_cleanup_vblank; + +	/* Probe non kms sub drivers and virtual display driver. */ +	ret = exynos_drm_device_subdrv_probe(dev); +	if (ret) +		goto err_unbind_all; + +	/* force connectors detection */ +	drm_helper_hpd_irq_event(dev); + +	return 0; + +err_unbind_all: +	component_unbind_all(dev->dev, dev); +err_cleanup_vblank: +	drm_vblank_cleanup(dev); +err_mode_config_cleanup: +	drm_mode_config_cleanup(dev); +	drm_release_iommu_mapping(dev); +err_free_private: +	kfree(private); + +	return ret; +} + +static int exynos_drm_unload(struct drm_device *dev) +{ +	exynos_drm_device_subdrv_remove(dev); + +	exynos_drm_fbdev_fini(dev); +	drm_vblank_cleanup(dev); +	drm_kms_helper_poll_fini(dev); +	drm_mode_config_cleanup(dev); + +	drm_release_iommu_mapping(dev); +	kfree(dev->dev_private); + +	component_unbind_all(dev->dev, dev); +	dev->dev_private = NULL; + +	return 0; +} + +static const struct file_operations exynos_drm_gem_fops = { +	.mmap = exynos_drm_gem_mmap_buffer, +}; + +static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state) +{ +	struct drm_connector *connector; + +	drm_modeset_lock_all(dev); +	list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +		int old_dpms = connector->dpms; + +		if (connector->funcs->dpms) +			connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF); + +		/* Set the old mode back to the connector for resume */ +		connector->dpms = old_dpms; +	} +	drm_modeset_unlock_all(dev); + +	return 0; +} + +static int exynos_drm_resume(struct drm_device *dev) +{ +	struct drm_connector *connector; + +	drm_modeset_lock_all(dev); +	list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +		if (connector->funcs->dpms) +			connector->funcs->dpms(connector, connector->dpms); +	} +	drm_modeset_unlock_all(dev); + +	drm_helper_resume_force_mode(dev); + +	return 0; +} + +static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) +{ +	struct drm_exynos_file_private *file_priv; +	struct file *anon_filp; +	int ret; + +	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); +	if (!file_priv) +		return -ENOMEM; + +	file->driver_priv = file_priv; + +	ret = exynos_drm_subdrv_open(dev, file); +	if (ret) +		goto err_file_priv_free; + +	anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops, +					NULL, 0); +	if (IS_ERR(anon_filp)) { +		ret = PTR_ERR(anon_filp); +		goto err_subdrv_close; +	} + +	anon_filp->f_mode = FMODE_READ | FMODE_WRITE; +	file_priv->anon_filp = anon_filp; + +	return ret; + +err_subdrv_close: +	exynos_drm_subdrv_close(dev, file); + +err_file_priv_free: +	kfree(file_priv); +	file->driver_priv = NULL; +	return ret; +} + +static void exynos_drm_preclose(struct drm_device *dev, +					struct drm_file *file) +{ +	exynos_drm_subdrv_close(dev, file); +} + +static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) +{ +	struct exynos_drm_private *private = dev->dev_private; +	struct drm_exynos_file_private *file_priv; +	struct drm_pending_vblank_event *v, *vt; +	struct drm_pending_event *e, *et; +	unsigned long flags; + +	if (!file->driver_priv) +		return; + +	/* Release all events not unhandled by page flip handler. */ +	spin_lock_irqsave(&dev->event_lock, flags); +	list_for_each_entry_safe(v, vt, &private->pageflip_event_list, +			base.link) { +		if (v->base.file_priv == file) { +			list_del(&v->base.link); +			drm_vblank_put(dev, v->pipe); +			v->base.destroy(&v->base); +		} +	} + +	/* Release all events handled by page flip handler but not freed. */ +	list_for_each_entry_safe(e, et, &file->event_list, link) { +		list_del(&e->link); +		e->destroy(e); +	} +	spin_unlock_irqrestore(&dev->event_lock, flags); + +	file_priv = file->driver_priv; +	if (file_priv->anon_filp) +		fput(file_priv->anon_filp); + +	kfree(file->driver_priv); +	file->driver_priv = NULL; +} + +static void exynos_drm_lastclose(struct drm_device *dev) +{ +	exynos_drm_fbdev_restore_mode(dev); +} + +static const struct vm_operations_struct exynos_drm_gem_vm_ops = { +	.fault = exynos_drm_gem_fault, +	.open = drm_gem_vm_open, +	.close = drm_gem_vm_close, +}; + +static const struct drm_ioctl_desc exynos_ioctls[] = { +	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl, +			DRM_UNLOCKED | DRM_AUTH), +	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP_OFFSET, +			exynos_drm_gem_map_offset_ioctl, DRM_UNLOCKED | +			DRM_AUTH), +	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP, +			exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH), +	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, +			exynos_drm_gem_get_ioctl, DRM_UNLOCKED), +	DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, +			vidi_connection_ioctl, DRM_UNLOCKED | DRM_AUTH), +	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER, +			exynos_g2d_get_ver_ioctl, DRM_UNLOCKED | DRM_AUTH), +	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST, +			exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH), +	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, +			exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH), +	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY, +			exynos_drm_ipp_get_property, DRM_UNLOCKED | DRM_AUTH), +	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY, +			exynos_drm_ipp_set_property, DRM_UNLOCKED | DRM_AUTH), +	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF, +			exynos_drm_ipp_queue_buf, DRM_UNLOCKED | DRM_AUTH), +	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL, +			exynos_drm_ipp_cmd_ctrl, DRM_UNLOCKED | DRM_AUTH), +}; + +static const struct file_operations exynos_drm_driver_fops = { +	.owner		= THIS_MODULE, +	.open		= drm_open, +	.mmap		= exynos_drm_gem_mmap, +	.poll		= drm_poll, +	.read		= drm_read, +	.unlocked_ioctl	= drm_ioctl, +#ifdef CONFIG_COMPAT +	.compat_ioctl = drm_compat_ioctl, +#endif +	.release	= drm_release, +}; + +static struct drm_driver exynos_drm_driver = { +	.driver_features	= DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, +	.load			= exynos_drm_load, +	.unload			= exynos_drm_unload, +	.suspend		= exynos_drm_suspend, +	.resume			= exynos_drm_resume, +	.open			= exynos_drm_open, +	.preclose		= exynos_drm_preclose, +	.lastclose		= exynos_drm_lastclose, +	.postclose		= exynos_drm_postclose, +	.get_vblank_counter	= drm_vblank_count, +	.enable_vblank		= exynos_drm_crtc_enable_vblank, +	.disable_vblank		= exynos_drm_crtc_disable_vblank, +	.gem_free_object	= exynos_drm_gem_free_object, +	.gem_vm_ops		= &exynos_drm_gem_vm_ops, +	.dumb_create		= exynos_drm_gem_dumb_create, +	.dumb_map_offset	= exynos_drm_gem_dumb_map_offset, +	.dumb_destroy		= drm_gem_dumb_destroy, +	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd, +	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle, +	.gem_prime_export	= exynos_dmabuf_prime_export, +	.gem_prime_import	= exynos_dmabuf_prime_import, +	.ioctls			= exynos_ioctls, +	.num_ioctls		= ARRAY_SIZE(exynos_ioctls), +	.fops			= &exynos_drm_driver_fops, +	.name	= DRIVER_NAME, +	.desc	= DRIVER_DESC, +	.date	= DRIVER_DATE, +	.major	= DRIVER_MAJOR, +	.minor	= DRIVER_MINOR, +}; + +#ifdef CONFIG_PM_SLEEP +static int exynos_drm_sys_suspend(struct device *dev) +{ +	struct drm_device *drm_dev = dev_get_drvdata(dev); +	pm_message_t message; + +	if (pm_runtime_suspended(dev)) +		return 0; + +	message.event = PM_EVENT_SUSPEND; +	return exynos_drm_suspend(drm_dev, message); +} + +static int exynos_drm_sys_resume(struct device *dev) +{ +	struct drm_device *drm_dev = dev_get_drvdata(dev); + +	if (pm_runtime_suspended(dev)) +		return 0; + +	return exynos_drm_resume(drm_dev); +} +#endif + +static const struct dev_pm_ops exynos_drm_pm_ops = { +	SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_sys_suspend, exynos_drm_sys_resume) +}; + +int exynos_drm_component_add(struct device *dev, +				enum exynos_drm_device_type dev_type, +				enum exynos_drm_output_type out_type) +{ +	struct component_dev *cdev; + +	if (dev_type != EXYNOS_DEVICE_TYPE_CRTC && +			dev_type != EXYNOS_DEVICE_TYPE_CONNECTOR) { +		DRM_ERROR("invalid device type.\n"); +		return -EINVAL; +	} + +	mutex_lock(&drm_component_lock); + +	/* +	 * Make sure to check if there is a component which has two device +	 * objects, for connector and for encoder/connector. +	 * It should make sure that crtc and encoder/connector drivers are +	 * ready before exynos drm core binds them. +	 */ +	list_for_each_entry(cdev, &drm_component_list, list) { +		if (cdev->out_type == out_type) { +			/* +			 * If crtc and encoder/connector device objects are +			 * added already just return. +			 */ +			if (cdev->dev_type_flag == (EXYNOS_DEVICE_TYPE_CRTC | +						EXYNOS_DEVICE_TYPE_CONNECTOR)) { +				mutex_unlock(&drm_component_lock); +				return 0; +			} + +			if (dev_type == EXYNOS_DEVICE_TYPE_CRTC) { +				cdev->crtc_dev = dev; +				cdev->dev_type_flag |= dev_type; +			} + +			if (dev_type == EXYNOS_DEVICE_TYPE_CONNECTOR) { +				cdev->conn_dev = dev; +				cdev->dev_type_flag |= dev_type; +			} + +			mutex_unlock(&drm_component_lock); +			return 0; +		} +	} + +	mutex_unlock(&drm_component_lock); + +	cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); +	if (!cdev) +		return -ENOMEM; + +	if (dev_type == EXYNOS_DEVICE_TYPE_CRTC) +		cdev->crtc_dev = dev; +	if (dev_type == EXYNOS_DEVICE_TYPE_CONNECTOR) +		cdev->conn_dev = dev; + +	cdev->out_type = out_type; +	cdev->dev_type_flag = dev_type; + +	mutex_lock(&drm_component_lock); +	list_add_tail(&cdev->list, &drm_component_list); +	mutex_unlock(&drm_component_lock); + +	return 0; +} + +void exynos_drm_component_del(struct device *dev, +				enum exynos_drm_device_type dev_type) +{ +	struct component_dev *cdev, *next; + +	mutex_lock(&drm_component_lock); + +	list_for_each_entry_safe(cdev, next, &drm_component_list, list) { +		if (dev_type == EXYNOS_DEVICE_TYPE_CRTC) { +			if (cdev->crtc_dev == dev) { +				cdev->crtc_dev = NULL; +				cdev->dev_type_flag &= ~dev_type; +			} +		} + +		if (dev_type == EXYNOS_DEVICE_TYPE_CONNECTOR) { +			if (cdev->conn_dev == dev) { +				cdev->conn_dev = NULL; +				cdev->dev_type_flag &= ~dev_type; +			} +		} + +		/* +		 * Release cdev object only in case that both of crtc and +		 * encoder/connector device objects are NULL. +		 */ +		if (!cdev->crtc_dev && !cdev->conn_dev) { +			list_del(&cdev->list); +			kfree(cdev); +		} + +		break; +	} + +	mutex_unlock(&drm_component_lock); +} + +static int compare_of(struct device *dev, void *data) +{ +	return dev == (struct device *)data; +} + +static int exynos_drm_add_components(struct device *dev, struct master *m) +{ +	struct component_dev *cdev; +	unsigned int attach_cnt = 0; + +	mutex_lock(&drm_component_lock); + +	list_for_each_entry(cdev, &drm_component_list, list) { +		int ret; + +		/* +		 * Add components to master only in case that crtc and +		 * encoder/connector device objects exist. +		 */ +		if (!cdev->crtc_dev || !cdev->conn_dev) +			continue; + +		attach_cnt++; + +		mutex_unlock(&drm_component_lock); + +		/* +		 * fimd and dpi modules have same device object so add +		 * only crtc device object in this case. +		 * +		 * TODO. if dpi module follows driver-model driver then +		 * below codes can be removed. +		 */ +		if (cdev->crtc_dev == cdev->conn_dev) { +			ret = component_master_add_child(m, compare_of, +					cdev->crtc_dev); +			if (ret < 0) +				return ret; + +			goto out_lock; +		} + +		/* +		 * Do not chage below call order. +		 * crtc device first should be added to master because +		 * connector/encoder need pipe number of crtc when they +		 * are created. +		 */ +		ret = component_master_add_child(m, compare_of, cdev->crtc_dev); +		ret |= component_master_add_child(m, compare_of, +							cdev->conn_dev); +		if (ret < 0) +			return ret; + +out_lock: +		mutex_lock(&drm_component_lock); +	} + +	mutex_unlock(&drm_component_lock); + +	return attach_cnt ? 0 : -ENODEV; +} + +static int exynos_drm_bind(struct device *dev) +{ +	return drm_platform_init(&exynos_drm_driver, to_platform_device(dev)); +} + +static void exynos_drm_unbind(struct device *dev) +{ +	drm_put_dev(dev_get_drvdata(dev)); +} + +static const struct component_master_ops exynos_drm_ops = { +	.add_components = exynos_drm_add_components, +	.bind		= exynos_drm_bind, +	.unbind		= exynos_drm_unbind, +}; + +static int exynos_drm_platform_probe(struct platform_device *pdev) +{ +	int ret; + +	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); +	exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls); + +#ifdef CONFIG_DRM_EXYNOS_FIMD +	ret = platform_driver_register(&fimd_driver); +	if (ret < 0) +		return ret; +#endif + +#ifdef CONFIG_DRM_EXYNOS_DP +	ret = platform_driver_register(&dp_driver); +	if (ret < 0) +		goto err_unregister_fimd_drv; +#endif + +#ifdef CONFIG_DRM_EXYNOS_DSI +	ret = platform_driver_register(&dsi_driver); +	if (ret < 0) +		goto err_unregister_dp_drv; +#endif + +#ifdef CONFIG_DRM_EXYNOS_HDMI +	ret = platform_driver_register(&mixer_driver); +	if (ret < 0) +		goto err_unregister_dsi_drv; +	ret = platform_driver_register(&hdmi_driver); +	if (ret < 0) +		goto err_unregister_mixer_drv; +#endif + +#ifdef CONFIG_DRM_EXYNOS_G2D +	ret = platform_driver_register(&g2d_driver); +	if (ret < 0) +		goto err_unregister_hdmi_drv; +#endif + +#ifdef CONFIG_DRM_EXYNOS_FIMC +	ret = platform_driver_register(&fimc_driver); +	if (ret < 0) +		goto err_unregister_g2d_drv; +#endif + +#ifdef CONFIG_DRM_EXYNOS_ROTATOR +	ret = platform_driver_register(&rotator_driver); +	if (ret < 0) +		goto err_unregister_fimc_drv; +#endif + +#ifdef CONFIG_DRM_EXYNOS_GSC +	ret = platform_driver_register(&gsc_driver); +	if (ret < 0) +		goto err_unregister_rotator_drv; +#endif + +#ifdef CONFIG_DRM_EXYNOS_IPP +	ret = platform_driver_register(&ipp_driver); +	if (ret < 0) +		goto err_unregister_gsc_drv; + +	ret = exynos_platform_device_ipp_register(); +	if (ret < 0) +		goto err_unregister_ipp_drv; +#endif + +	ret = component_master_add(&pdev->dev, &exynos_drm_ops); +	if (ret < 0) +		DRM_DEBUG_KMS("re-tried by last sub driver probed later.\n"); + +	return 0; + +#ifdef CONFIG_DRM_EXYNOS_IPP +err_unregister_ipp_drv: +	platform_driver_unregister(&ipp_driver); +err_unregister_gsc_drv: +#endif + +#ifdef CONFIG_DRM_EXYNOS_GSC +	platform_driver_unregister(&gsc_driver); +err_unregister_rotator_drv: +#endif + +#ifdef CONFIG_DRM_EXYNOS_ROTATOR +	platform_driver_unregister(&rotator_driver); +err_unregister_fimc_drv: +#endif + +#ifdef CONFIG_DRM_EXYNOS_FIMC +	platform_driver_unregister(&fimc_driver); +err_unregister_g2d_drv: +#endif + +#ifdef CONFIG_DRM_EXYNOS_G2D +	platform_driver_unregister(&g2d_driver); +err_unregister_hdmi_drv: +#endif + +#ifdef CONFIG_DRM_EXYNOS_HDMI +	platform_driver_unregister(&hdmi_driver); +err_unregister_mixer_drv: +	platform_driver_unregister(&mixer_driver); +err_unregister_dsi_drv: +#endif + +#ifdef CONFIG_DRM_EXYNOS_DSI +	platform_driver_unregister(&dsi_driver); +err_unregister_dp_drv: +#endif + +#ifdef CONFIG_DRM_EXYNOS_DP +	platform_driver_unregister(&dp_driver); +err_unregister_fimd_drv: +#endif + +#ifdef CONFIG_DRM_EXYNOS_FIMD +	platform_driver_unregister(&fimd_driver); +#endif +	return ret; +} + +static int exynos_drm_platform_remove(struct platform_device *pdev) +{ +#ifdef CONFIG_DRM_EXYNOS_IPP +	exynos_platform_device_ipp_unregister(); +	platform_driver_unregister(&ipp_driver); +#endif + +#ifdef CONFIG_DRM_EXYNOS_GSC +	platform_driver_unregister(&gsc_driver); +#endif + +#ifdef CONFIG_DRM_EXYNOS_ROTATOR +	platform_driver_unregister(&rotator_driver); +#endif + +#ifdef CONFIG_DRM_EXYNOS_FIMC +	platform_driver_unregister(&fimc_driver); +#endif + +#ifdef CONFIG_DRM_EXYNOS_G2D +	platform_driver_unregister(&g2d_driver); +#endif + +#ifdef CONFIG_DRM_EXYNOS_HDMI +	platform_driver_unregister(&mixer_driver); +	platform_driver_unregister(&hdmi_driver); +#endif + +#ifdef CONFIG_DRM_EXYNOS_FIMD +	platform_driver_unregister(&fimd_driver); +#endif + +#ifdef CONFIG_DRM_EXYNOS_DSI +	platform_driver_unregister(&dsi_driver); +#endif + +#ifdef CONFIG_DRM_EXYNOS_DP +	platform_driver_unregister(&dp_driver); +#endif +	component_master_del(&pdev->dev, &exynos_drm_ops); +	return 0; +} + +static struct platform_driver exynos_drm_platform_driver = { +	.probe	= exynos_drm_platform_probe, +	.remove	= exynos_drm_platform_remove, +	.driver	= { +		.owner	= THIS_MODULE, +		.name	= "exynos-drm", +		.pm	= &exynos_drm_pm_ops, +	}, +}; + +static int exynos_drm_init(void) +{ +	int ret; + +	exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1, +								NULL, 0); +	if (IS_ERR(exynos_drm_pdev)) +		return PTR_ERR(exynos_drm_pdev); + +#ifdef CONFIG_DRM_EXYNOS_VIDI +	ret = exynos_drm_probe_vidi(); +	if (ret < 0) +		goto err_unregister_pd; +#endif + +	ret = platform_driver_register(&exynos_drm_platform_driver); +	if (ret) +		goto err_remove_vidi; + +	return 0; + +err_remove_vidi: +#ifdef CONFIG_DRM_EXYNOS_VIDI +	exynos_drm_remove_vidi(); + +err_unregister_pd: +#endif +	platform_device_unregister(exynos_drm_pdev); + +	return ret; +} + +static void exynos_drm_exit(void) +{ +	platform_driver_unregister(&exynos_drm_platform_driver); +#ifdef CONFIG_DRM_EXYNOS_VIDI +	exynos_drm_remove_vidi(); +#endif +	platform_device_unregister(exynos_drm_pdev); +} + +module_init(exynos_drm_init); +module_exit(exynos_drm_exit); + +MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); +MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); +MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>"); +MODULE_DESCRIPTION("Samsung SoC DRM Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h new file mode 100644 index 00000000000..06cde450627 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -0,0 +1,383 @@ +/* exynos_drm_drv.h + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * Authors: + *	Inki Dae <inki.dae@samsung.com> + *	Joonyoung Shim <jy0922.shim@samsung.com> + *	Seung-Woo Kim <sw0312.kim@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#ifndef _EXYNOS_DRM_DRV_H_ +#define _EXYNOS_DRM_DRV_H_ + +#include <linux/module.h> + +#define MAX_CRTC	3 +#define MAX_PLANE	5 +#define MAX_FB_BUFFER	4 +#define DEFAULT_ZPOS	-1 + +#define _wait_for(COND, MS) ({ \ +	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);	\ +	int ret__ = 0;							\ +	while (!(COND)) {						\ +		if (time_after(jiffies, timeout__)) {			\ +			ret__ = -ETIMEDOUT;				\ +			break;						\ +		}							\ +	}								\ +	ret__;								\ +}) + +#define wait_for(COND, MS) _wait_for(COND, MS) + +struct drm_device; +struct exynos_drm_overlay; +struct drm_connector; + +extern unsigned int drm_vblank_offdelay; + +/* This enumerates device type. */ +enum exynos_drm_device_type { +	EXYNOS_DEVICE_TYPE_NONE, +	EXYNOS_DEVICE_TYPE_CRTC, +	EXYNOS_DEVICE_TYPE_CONNECTOR, +}; + +/* this enumerates display type. */ +enum exynos_drm_output_type { +	EXYNOS_DISPLAY_TYPE_NONE, +	/* RGB or CPU Interface. */ +	EXYNOS_DISPLAY_TYPE_LCD, +	/* HDMI Interface. */ +	EXYNOS_DISPLAY_TYPE_HDMI, +	/* Virtual Display Interface. */ +	EXYNOS_DISPLAY_TYPE_VIDI, +}; + +/* + * Exynos drm common overlay structure. + * + * @fb_x: offset x on a framebuffer to be displayed. + *	- the unit is screen coordinates. + * @fb_y: offset y on a framebuffer to be displayed. + *	- the unit is screen coordinates. + * @fb_width: width of a framebuffer. + * @fb_height: height of a framebuffer. + * @src_width: width of a partial image to be displayed from framebuffer. + * @src_height: height of a partial image to be displayed from framebuffer. + * @crtc_x: offset x on hardware screen. + * @crtc_y: offset y on hardware screen. + * @crtc_width: window width to be displayed (hardware screen). + * @crtc_height: window height to be displayed (hardware screen). + * @mode_width: width of screen mode. + * @mode_height: height of screen mode. + * @refresh: refresh rate. + * @scan_flag: interlace or progressive way. + *	(it could be DRM_MODE_FLAG_*) + * @bpp: pixel size.(in bit) + * @pixel_format: fourcc pixel format of this overlay + * @dma_addr: array of bus(accessed by dma) address to the memory region + *	      allocated for a overlay. + * @zpos: order of overlay layer(z position). + * @default_win: a window to be enabled. + * @color_key: color key on or off. + * @index_color: if using color key feature then this value would be used + *			as index color. + * @local_path: in case of lcd type, local path mode on or off. + * @transparency: transparency on or off. + * @activated: activated or not. + * + * this structure is common to exynos SoC and its contents would be copied + * to hardware specific overlay info. + */ +struct exynos_drm_overlay { +	unsigned int fb_x; +	unsigned int fb_y; +	unsigned int fb_width; +	unsigned int fb_height; +	unsigned int src_width; +	unsigned int src_height; +	unsigned int crtc_x; +	unsigned int crtc_y; +	unsigned int crtc_width; +	unsigned int crtc_height; +	unsigned int mode_width; +	unsigned int mode_height; +	unsigned int refresh; +	unsigned int scan_flag; +	unsigned int bpp; +	unsigned int pitch; +	uint32_t pixel_format; +	dma_addr_t dma_addr[MAX_FB_BUFFER]; +	int zpos; + +	bool default_win; +	bool color_key; +	unsigned int index_color; +	bool local_path; +	bool transparency; +	bool activated; +}; + +/* + * Exynos DRM Display Structure. + *	- this structure is common to analog tv, digital tv and lcd panel. + * + * @remove: cleans up the display for removal + * @mode_fixup: fix mode data comparing to hw specific display mode. + * @mode_set: convert drm_display_mode to hw specific display mode and + *	      would be called by encoder->mode_set(). + * @check_mode: check if mode is valid or not. + * @dpms: display device on or off. + * @commit: apply changes to hw + */ +struct exynos_drm_display; +struct exynos_drm_display_ops { +	int (*create_connector)(struct exynos_drm_display *display, +				struct drm_encoder *encoder); +	void (*remove)(struct exynos_drm_display *display); +	void (*mode_fixup)(struct exynos_drm_display *display, +				struct drm_connector *connector, +				const struct drm_display_mode *mode, +				struct drm_display_mode *adjusted_mode); +	void (*mode_set)(struct exynos_drm_display *display, +				struct drm_display_mode *mode); +	int (*check_mode)(struct exynos_drm_display *display, +				struct drm_display_mode *mode); +	void (*dpms)(struct exynos_drm_display *display, int mode); +	void (*commit)(struct exynos_drm_display *display); +}; + +/* + * Exynos drm display structure, maps 1:1 with an encoder/connector + * + * @list: the list entry for this manager + * @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI. + * @encoder: encoder object this display maps to + * @connector: connector object this display maps to + * @ops: pointer to callbacks for exynos drm specific functionality + * @ctx: A pointer to the display's implementation specific context + */ +struct exynos_drm_display { +	struct list_head list; +	enum exynos_drm_output_type type; +	struct drm_encoder *encoder; +	struct drm_connector *connector; +	struct exynos_drm_display_ops *ops; +	void *ctx; +}; + +/* + * Exynos drm manager ops + * + * @dpms: control device power. + * @mode_fixup: fix mode data before applying it + * @mode_set: set the given mode to the manager + * @commit: set current hw specific display mode to hw. + * @enable_vblank: specific driver callback for enabling vblank interrupt. + * @disable_vblank: specific driver callback for disabling vblank interrupt. + * @wait_for_vblank: wait for vblank interrupt to make sure that + *	hardware overlay is updated. + * @win_mode_set: copy drm overlay info to hw specific overlay info. + * @win_commit: apply hardware specific overlay data to registers. + * @win_enable: enable hardware specific overlay. + * @win_disable: disable hardware specific overlay. + */ +struct exynos_drm_manager; +struct exynos_drm_manager_ops { +	void (*dpms)(struct exynos_drm_manager *mgr, int mode); +	bool (*mode_fixup)(struct exynos_drm_manager *mgr, +				const struct drm_display_mode *mode, +				struct drm_display_mode *adjusted_mode); +	void (*mode_set)(struct exynos_drm_manager *mgr, +				const struct drm_display_mode *mode); +	void (*commit)(struct exynos_drm_manager *mgr); +	int (*enable_vblank)(struct exynos_drm_manager *mgr); +	void (*disable_vblank)(struct exynos_drm_manager *mgr); +	void (*wait_for_vblank)(struct exynos_drm_manager *mgr); +	void (*win_mode_set)(struct exynos_drm_manager *mgr, +				struct exynos_drm_overlay *overlay); +	void (*win_commit)(struct exynos_drm_manager *mgr, int zpos); +	void (*win_enable)(struct exynos_drm_manager *mgr, int zpos); +	void (*win_disable)(struct exynos_drm_manager *mgr, int zpos); +}; + +/* + * Exynos drm common manager structure, maps 1:1 with a crtc + * + * @list: the list entry for this manager + * @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI. + * @drm_dev: pointer to the drm device + * @crtc: crtc object. + * @pipe: the pipe number for this crtc/manager + * @ops: pointer to callbacks for exynos drm specific functionality + * @ctx: A pointer to the manager's implementation specific context + */ +struct exynos_drm_manager { +	struct list_head list; +	enum exynos_drm_output_type type; +	struct drm_device *drm_dev; +	struct drm_crtc *crtc; +	int pipe; +	struct exynos_drm_manager_ops *ops; +	void *ctx; +}; + +struct exynos_drm_g2d_private { +	struct device		*dev; +	struct list_head	inuse_cmdlist; +	struct list_head	event_list; +	struct list_head	userptr_list; +}; + +struct exynos_drm_ipp_private { +	struct device	*dev; +	struct list_head	event_list; +}; + +struct drm_exynos_file_private { +	struct exynos_drm_g2d_private	*g2d_priv; +	struct exynos_drm_ipp_private	*ipp_priv; +	struct file			*anon_filp; +}; + +/* + * Exynos drm private structure. + * + * @da_start: start address to device address space. + *	with iommu, device address space starts from this address + *	otherwise default one. + * @da_space_size: size of device address space. + *	if 0 then default value is used for it. + * @pipe: the pipe number for this crtc/manager. + */ +struct exynos_drm_private { +	struct drm_fb_helper *fb_helper; + +	/* list head for new event to be added. */ +	struct list_head pageflip_event_list; + +	/* +	 * created crtc object would be contained at this array and +	 * this array is used to be aware of which crtc did it request vblank. +	 */ +	struct drm_crtc *crtc[MAX_CRTC]; +	struct drm_property *plane_zpos_property; +	struct drm_property *crtc_mode_property; + +	unsigned long da_start; +	unsigned long da_space_size; + +	unsigned int pipe; +}; + +/* + * Exynos drm sub driver structure. + * + * @list: sub driver has its own list object to register to exynos drm driver. + * @dev: pointer to device object for subdrv device driver. + * @drm_dev: pointer to drm_device and this pointer would be set + *	when sub driver calls exynos_drm_subdrv_register(). + * @manager: subdrv has its own manager to control a hardware appropriately + *     and we can access a hardware drawing on this manager. + * @probe: this callback would be called by exynos drm driver after + *     subdrv is registered to it. + * @remove: this callback is used to release resources created + *     by probe callback. + * @open: this would be called with drm device file open. + * @close: this would be called with drm device file close. + */ +struct exynos_drm_subdrv { +	struct list_head list; +	struct device *dev; +	struct drm_device *drm_dev; + +	int (*probe)(struct drm_device *drm_dev, struct device *dev); +	void (*remove)(struct drm_device *drm_dev, struct device *dev); +	int (*open)(struct drm_device *drm_dev, struct device *dev, +			struct drm_file *file); +	void (*close)(struct drm_device *drm_dev, struct device *dev, +			struct drm_file *file); +}; + + /* This function would be called by non kms drivers such as g2d and ipp. */ +int exynos_drm_subdrv_register(struct exynos_drm_subdrv *drm_subdrv); + +/* this function removes subdrv list from exynos drm driver */ +int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv); + +int exynos_drm_device_subdrv_probe(struct drm_device *dev); +int exynos_drm_device_subdrv_remove(struct drm_device *dev); +int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file); +void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file); + +/* + * this function registers exynos drm hdmi platform device. It ensures only one + * instance of the device is created. + */ +int exynos_platform_device_hdmi_register(void); + +/* + * this function unregisters exynos drm hdmi platform device if it exists. + */ +void exynos_platform_device_hdmi_unregister(void); + +/* + * this function registers exynos drm ipp platform device. + */ +int exynos_platform_device_ipp_register(void); + +/* + * this function unregisters exynos drm ipp platform device if it exists. + */ +void exynos_platform_device_ipp_unregister(void); + +#ifdef CONFIG_DRM_EXYNOS_DPI +struct exynos_drm_display * exynos_dpi_probe(struct device *dev); +int exynos_dpi_remove(struct device *dev); +#else +static inline struct exynos_drm_display * +exynos_dpi_probe(struct device *dev) { return NULL; } +static inline int exynos_dpi_remove(struct device *dev) { return 0; } +#endif + +/* + * this function registers exynos drm vidi platform device/driver. + */ +int exynos_drm_probe_vidi(void); + +/* + * this function unregister exynos drm vidi platform device/driver. + */ +void exynos_drm_remove_vidi(void); + +/* This function creates a encoder and a connector, and initializes them. */ +int exynos_drm_create_enc_conn(struct drm_device *dev, +				struct exynos_drm_display *display); + +int exynos_drm_component_add(struct device *dev, +				enum exynos_drm_device_type dev_type, +				enum exynos_drm_output_type out_type); + +void exynos_drm_component_del(struct device *dev, +				enum exynos_drm_device_type dev_type); + +extern struct platform_driver fimd_driver; +extern struct platform_driver dp_driver; +extern struct platform_driver dsi_driver; +extern struct platform_driver mixer_driver; +extern struct platform_driver hdmi_driver; +extern struct platform_driver exynos_drm_common_hdmi_driver; +extern struct platform_driver vidi_driver; +extern struct platform_driver g2d_driver; +extern struct platform_driver fimc_driver; +extern struct platform_driver rotator_driver; +extern struct platform_driver gsc_driver; +extern struct platform_driver ipp_driver; +#endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c new file mode 100644 index 00000000000..6302aa64f6c --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -0,0 +1,1546 @@ +/* + * Samsung SoC MIPI DSI Master driver. + * + * Copyright (c) 2014 Samsung Electronics Co., Ltd + * + * Contacts: Tomasz Figa <t.figa@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_panel.h> + +#include <linux/clk.h> +#include <linux/irq.h> +#include <linux/phy/phy.h> +#include <linux/regulator/consumer.h> +#include <linux/component.h> + +#include <video/mipi_display.h> +#include <video/videomode.h> + +#include "exynos_drm_drv.h" + +/* returns true iff both arguments logically differs */ +#define NEQV(a, b) (!(a) ^ !(b)) + +#define DSIM_STATUS_REG		0x0	/* Status register */ +#define DSIM_SWRST_REG		0x4	/* Software reset register */ +#define DSIM_CLKCTRL_REG	0x8	/* Clock control register */ +#define DSIM_TIMEOUT_REG	0xc	/* Time out register */ +#define DSIM_CONFIG_REG		0x10	/* Configuration register */ +#define DSIM_ESCMODE_REG	0x14	/* Escape mode register */ + +/* Main display image resolution register */ +#define DSIM_MDRESOL_REG	0x18 +#define DSIM_MVPORCH_REG	0x1c	/* Main display Vporch register */ +#define DSIM_MHPORCH_REG	0x20	/* Main display Hporch register */ +#define DSIM_MSYNC_REG		0x24	/* Main display sync area register */ + +/* Sub display image resolution register */ +#define DSIM_SDRESOL_REG	0x28 +#define DSIM_INTSRC_REG		0x2c	/* Interrupt source register */ +#define DSIM_INTMSK_REG		0x30	/* Interrupt mask register */ +#define DSIM_PKTHDR_REG		0x34	/* Packet Header FIFO register */ +#define DSIM_PAYLOAD_REG	0x38	/* Payload FIFO register */ +#define DSIM_RXFIFO_REG		0x3c	/* Read FIFO register */ +#define DSIM_FIFOTHLD_REG	0x40	/* FIFO threshold level register */ +#define DSIM_FIFOCTRL_REG	0x44	/* FIFO status and control register */ + +/* FIFO memory AC characteristic register */ +#define DSIM_PLLCTRL_REG	0x4c	/* PLL control register */ +#define DSIM_PLLTMR_REG		0x50	/* PLL timer register */ +#define DSIM_PHYACCHR_REG	0x54	/* D-PHY AC characteristic register */ +#define DSIM_PHYACCHR1_REG	0x58	/* D-PHY AC characteristic register1 */ + +/* DSIM_STATUS */ +#define DSIM_STOP_STATE_DAT(x)		(((x) & 0xf) << 0) +#define DSIM_STOP_STATE_CLK		(1 << 8) +#define DSIM_TX_READY_HS_CLK		(1 << 10) +#define DSIM_PLL_STABLE			(1 << 31) + +/* DSIM_SWRST */ +#define DSIM_FUNCRST			(1 << 16) +#define DSIM_SWRST			(1 << 0) + +/* DSIM_TIMEOUT */ +#define DSIM_LPDR_TIMEOUT(x)		((x) << 0) +#define DSIM_BTA_TIMEOUT(x)		((x) << 16) + +/* DSIM_CLKCTRL */ +#define DSIM_ESC_PRESCALER(x)		(((x) & 0xffff) << 0) +#define DSIM_ESC_PRESCALER_MASK		(0xffff << 0) +#define DSIM_LANE_ESC_CLK_EN_CLK	(1 << 19) +#define DSIM_LANE_ESC_CLK_EN_DATA(x)	(((x) & 0xf) << 20) +#define DSIM_LANE_ESC_CLK_EN_DATA_MASK	(0xf << 20) +#define DSIM_BYTE_CLKEN			(1 << 24) +#define DSIM_BYTE_CLK_SRC(x)		(((x) & 0x3) << 25) +#define DSIM_BYTE_CLK_SRC_MASK		(0x3 << 25) +#define DSIM_PLL_BYPASS			(1 << 27) +#define DSIM_ESC_CLKEN			(1 << 28) +#define DSIM_TX_REQUEST_HSCLK		(1 << 31) + +/* DSIM_CONFIG */ +#define DSIM_LANE_EN_CLK		(1 << 0) +#define DSIM_LANE_EN(x)			(((x) & 0xf) << 1) +#define DSIM_NUM_OF_DATA_LANE(x)	(((x) & 0x3) << 5) +#define DSIM_SUB_PIX_FORMAT(x)		(((x) & 0x7) << 8) +#define DSIM_MAIN_PIX_FORMAT_MASK	(0x7 << 12) +#define DSIM_MAIN_PIX_FORMAT_RGB888	(0x7 << 12) +#define DSIM_MAIN_PIX_FORMAT_RGB666	(0x6 << 12) +#define DSIM_MAIN_PIX_FORMAT_RGB666_P	(0x5 << 12) +#define DSIM_MAIN_PIX_FORMAT_RGB565	(0x4 << 12) +#define DSIM_SUB_VC			(((x) & 0x3) << 16) +#define DSIM_MAIN_VC			(((x) & 0x3) << 18) +#define DSIM_HSA_MODE			(1 << 20) +#define DSIM_HBP_MODE			(1 << 21) +#define DSIM_HFP_MODE			(1 << 22) +#define DSIM_HSE_MODE			(1 << 23) +#define DSIM_AUTO_MODE			(1 << 24) +#define DSIM_VIDEO_MODE			(1 << 25) +#define DSIM_BURST_MODE			(1 << 26) +#define DSIM_SYNC_INFORM		(1 << 27) +#define DSIM_EOT_DISABLE		(1 << 28) +#define DSIM_MFLUSH_VS			(1 << 29) + +/* DSIM_ESCMODE */ +#define DSIM_TX_TRIGGER_RST		(1 << 4) +#define DSIM_TX_LPDT_LP			(1 << 6) +#define DSIM_CMD_LPDT_LP		(1 << 7) +#define DSIM_FORCE_BTA			(1 << 16) +#define DSIM_FORCE_STOP_STATE		(1 << 20) +#define DSIM_STOP_STATE_CNT(x)		(((x) & 0x7ff) << 21) +#define DSIM_STOP_STATE_CNT_MASK	(0x7ff << 21) + +/* DSIM_MDRESOL */ +#define DSIM_MAIN_STAND_BY		(1 << 31) +#define DSIM_MAIN_VRESOL(x)		(((x) & 0x7ff) << 16) +#define DSIM_MAIN_HRESOL(x)		(((x) & 0X7ff) << 0) + +/* DSIM_MVPORCH */ +#define DSIM_CMD_ALLOW(x)		((x) << 28) +#define DSIM_STABLE_VFP(x)		((x) << 16) +#define DSIM_MAIN_VBP(x)		((x) << 0) +#define DSIM_CMD_ALLOW_MASK		(0xf << 28) +#define DSIM_STABLE_VFP_MASK		(0x7ff << 16) +#define DSIM_MAIN_VBP_MASK		(0x7ff << 0) + +/* DSIM_MHPORCH */ +#define DSIM_MAIN_HFP(x)		((x) << 16) +#define DSIM_MAIN_HBP(x)		((x) << 0) +#define DSIM_MAIN_HFP_MASK		((0xffff) << 16) +#define DSIM_MAIN_HBP_MASK		((0xffff) << 0) + +/* DSIM_MSYNC */ +#define DSIM_MAIN_VSA(x)		((x) << 22) +#define DSIM_MAIN_HSA(x)		((x) << 0) +#define DSIM_MAIN_VSA_MASK		((0x3ff) << 22) +#define DSIM_MAIN_HSA_MASK		((0xffff) << 0) + +/* DSIM_SDRESOL */ +#define DSIM_SUB_STANDY(x)		((x) << 31) +#define DSIM_SUB_VRESOL(x)		((x) << 16) +#define DSIM_SUB_HRESOL(x)		((x) << 0) +#define DSIM_SUB_STANDY_MASK		((0x1) << 31) +#define DSIM_SUB_VRESOL_MASK		((0x7ff) << 16) +#define DSIM_SUB_HRESOL_MASK		((0x7ff) << 0) + +/* DSIM_INTSRC */ +#define DSIM_INT_PLL_STABLE		(1 << 31) +#define DSIM_INT_SW_RST_RELEASE		(1 << 30) +#define DSIM_INT_SFR_FIFO_EMPTY		(1 << 29) +#define DSIM_INT_BTA			(1 << 25) +#define DSIM_INT_FRAME_DONE		(1 << 24) +#define DSIM_INT_RX_TIMEOUT		(1 << 21) +#define DSIM_INT_BTA_TIMEOUT		(1 << 20) +#define DSIM_INT_RX_DONE		(1 << 18) +#define DSIM_INT_RX_TE			(1 << 17) +#define DSIM_INT_RX_ACK			(1 << 16) +#define DSIM_INT_RX_ECC_ERR		(1 << 15) +#define DSIM_INT_RX_CRC_ERR		(1 << 14) + +/* DSIM_FIFOCTRL */ +#define DSIM_RX_DATA_FULL		(1 << 25) +#define DSIM_RX_DATA_EMPTY		(1 << 24) +#define DSIM_SFR_HEADER_FULL		(1 << 23) +#define DSIM_SFR_HEADER_EMPTY		(1 << 22) +#define DSIM_SFR_PAYLOAD_FULL		(1 << 21) +#define DSIM_SFR_PAYLOAD_EMPTY		(1 << 20) +#define DSIM_I80_HEADER_FULL		(1 << 19) +#define DSIM_I80_HEADER_EMPTY		(1 << 18) +#define DSIM_I80_PAYLOAD_FULL		(1 << 17) +#define DSIM_I80_PAYLOAD_EMPTY		(1 << 16) +#define DSIM_SD_HEADER_FULL		(1 << 15) +#define DSIM_SD_HEADER_EMPTY		(1 << 14) +#define DSIM_SD_PAYLOAD_FULL		(1 << 13) +#define DSIM_SD_PAYLOAD_EMPTY		(1 << 12) +#define DSIM_MD_HEADER_FULL		(1 << 11) +#define DSIM_MD_HEADER_EMPTY		(1 << 10) +#define DSIM_MD_PAYLOAD_FULL		(1 << 9) +#define DSIM_MD_PAYLOAD_EMPTY		(1 << 8) +#define DSIM_RX_FIFO			(1 << 4) +#define DSIM_SFR_FIFO			(1 << 3) +#define DSIM_I80_FIFO			(1 << 2) +#define DSIM_SD_FIFO			(1 << 1) +#define DSIM_MD_FIFO			(1 << 0) + +/* DSIM_PHYACCHR */ +#define DSIM_AFC_EN			(1 << 14) +#define DSIM_AFC_CTL(x)			(((x) & 0x7) << 5) + +/* DSIM_PLLCTRL */ +#define DSIM_FREQ_BAND(x)		((x) << 24) +#define DSIM_PLL_EN			(1 << 23) +#define DSIM_PLL_P(x)			((x) << 13) +#define DSIM_PLL_M(x)			((x) << 4) +#define DSIM_PLL_S(x)			((x) << 1) + +#define DSI_MAX_BUS_WIDTH		4 +#define DSI_NUM_VIRTUAL_CHANNELS	4 +#define DSI_TX_FIFO_SIZE		2048 +#define DSI_RX_FIFO_SIZE		256 +#define DSI_XFER_TIMEOUT_MS		100 +#define DSI_RX_FIFO_EMPTY		0x30800002 + +enum exynos_dsi_transfer_type { +	EXYNOS_DSI_TX, +	EXYNOS_DSI_RX, +}; + +struct exynos_dsi_transfer { +	struct list_head list; +	struct completion completed; +	int result; +	u8 data_id; +	u8 data[2]; +	u16 flags; + +	const u8 *tx_payload; +	u16 tx_len; +	u16 tx_done; + +	u8 *rx_payload; +	u16 rx_len; +	u16 rx_done; +}; + +#define DSIM_STATE_ENABLED		BIT(0) +#define DSIM_STATE_INITIALIZED		BIT(1) +#define DSIM_STATE_CMD_LPM		BIT(2) + +struct exynos_dsi { +	struct mipi_dsi_host dsi_host; +	struct drm_connector connector; +	struct drm_encoder *encoder; +	struct device_node *panel_node; +	struct drm_panel *panel; +	struct device *dev; + +	void __iomem *reg_base; +	struct phy *phy; +	struct clk *pll_clk; +	struct clk *bus_clk; +	struct regulator_bulk_data supplies[2]; +	int irq; + +	u32 pll_clk_rate; +	u32 burst_clk_rate; +	u32 esc_clk_rate; +	u32 lanes; +	u32 mode_flags; +	u32 format; +	struct videomode vm; + +	int state; +	struct drm_property *brightness; +	struct completion completed; + +	spinlock_t transfer_lock; /* protects transfer_list */ +	struct list_head transfer_list; +}; + +#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host) +#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector) + +static void exynos_dsi_wait_for_reset(struct exynos_dsi *dsi) +{ +	if (wait_for_completion_timeout(&dsi->completed, msecs_to_jiffies(300))) +		return; + +	dev_err(dsi->dev, "timeout waiting for reset\n"); +} + +static void exynos_dsi_reset(struct exynos_dsi *dsi) +{ +	reinit_completion(&dsi->completed); +	writel(DSIM_SWRST, dsi->reg_base + DSIM_SWRST_REG); +} + +#ifndef MHZ +#define MHZ	(1000*1000) +#endif + +static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi, +		unsigned long fin, unsigned long fout, u8 *p, u16 *m, u8 *s) +{ +	unsigned long best_freq = 0; +	u32 min_delta = 0xffffffff; +	u8 p_min, p_max; +	u8 _p, uninitialized_var(best_p); +	u16 _m, uninitialized_var(best_m); +	u8 _s, uninitialized_var(best_s); + +	p_min = DIV_ROUND_UP(fin, (12 * MHZ)); +	p_max = fin / (6 * MHZ); + +	for (_p = p_min; _p <= p_max; ++_p) { +		for (_s = 0; _s <= 5; ++_s) { +			u64 tmp; +			u32 delta; + +			tmp = (u64)fout * (_p << _s); +			do_div(tmp, fin); +			_m = tmp; +			if (_m < 41 || _m > 125) +				continue; + +			tmp = (u64)_m * fin; +			do_div(tmp, _p); +			if (tmp < 500 * MHZ || tmp > 1000 * MHZ) +				continue; + +			tmp = (u64)_m * fin; +			do_div(tmp, _p << _s); + +			delta = abs(fout - tmp); +			if (delta < min_delta) { +				best_p = _p; +				best_m = _m; +				best_s = _s; +				min_delta = delta; +				best_freq = tmp; +			} +		} +	} + +	if (best_freq) { +		*p = best_p; +		*m = best_m; +		*s = best_s; +	} + +	return best_freq; +} + +static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi, +					unsigned long freq) +{ +	static const unsigned long freq_bands[] = { +		100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ, +		270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ, +		510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ, +		770 * MHZ, 870 * MHZ, 950 * MHZ, +	}; +	unsigned long fin, fout; +	int timeout, band; +	u8 p, s; +	u16 m; +	u32 reg; + +	clk_set_rate(dsi->pll_clk, dsi->pll_clk_rate); + +	fin = clk_get_rate(dsi->pll_clk); +	if (!fin) { +		dev_err(dsi->dev, "failed to get PLL clock frequency\n"); +		return 0; +	} + +	dev_dbg(dsi->dev, "PLL input frequency: %lu\n", fin); + +	fout = exynos_dsi_pll_find_pms(dsi, fin, freq, &p, &m, &s); +	if (!fout) { +		dev_err(dsi->dev, +			"failed to find PLL PMS for requested frequency\n"); +		return -EFAULT; +	} + +	for (band = 0; band < ARRAY_SIZE(freq_bands); ++band) +		if (fout < freq_bands[band]) +			break; + +	dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d), band %d\n", fout, +		p, m, s, band); + +	writel(500, dsi->reg_base + DSIM_PLLTMR_REG); + +	reg = DSIM_FREQ_BAND(band) | DSIM_PLL_EN +			| DSIM_PLL_P(p) | DSIM_PLL_M(m) | DSIM_PLL_S(s); +	writel(reg, dsi->reg_base + DSIM_PLLCTRL_REG); + +	timeout = 1000; +	do { +		if (timeout-- == 0) { +			dev_err(dsi->dev, "PLL failed to stabilize\n"); +			return -EFAULT; +		} +		reg = readl(dsi->reg_base + DSIM_STATUS_REG); +	} while ((reg & DSIM_PLL_STABLE) == 0); + +	return fout; +} + +static int exynos_dsi_enable_clock(struct exynos_dsi *dsi) +{ +	unsigned long hs_clk, byte_clk, esc_clk; +	unsigned long esc_div; +	u32 reg; + +	hs_clk = exynos_dsi_set_pll(dsi, dsi->burst_clk_rate); +	if (!hs_clk) { +		dev_err(dsi->dev, "failed to configure DSI PLL\n"); +		return -EFAULT; +	} + +	byte_clk = hs_clk / 8; +	esc_div = DIV_ROUND_UP(byte_clk, dsi->esc_clk_rate); +	esc_clk = byte_clk / esc_div; + +	if (esc_clk > 20 * MHZ) { +		++esc_div; +		esc_clk = byte_clk / esc_div; +	} + +	dev_dbg(dsi->dev, "hs_clk = %lu, byte_clk = %lu, esc_clk = %lu\n", +		hs_clk, byte_clk, esc_clk); + +	reg = readl(dsi->reg_base + DSIM_CLKCTRL_REG); +	reg &= ~(DSIM_ESC_PRESCALER_MASK | DSIM_LANE_ESC_CLK_EN_CLK +			| DSIM_LANE_ESC_CLK_EN_DATA_MASK | DSIM_PLL_BYPASS +			| DSIM_BYTE_CLK_SRC_MASK); +	reg |= DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN +			| DSIM_ESC_PRESCALER(esc_div) +			| DSIM_LANE_ESC_CLK_EN_CLK +			| DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1) +			| DSIM_BYTE_CLK_SRC(0) +			| DSIM_TX_REQUEST_HSCLK; +	writel(reg, dsi->reg_base + DSIM_CLKCTRL_REG); + +	return 0; +} + +static void exynos_dsi_disable_clock(struct exynos_dsi *dsi) +{ +	u32 reg; + +	reg = readl(dsi->reg_base + DSIM_CLKCTRL_REG); +	reg &= ~(DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA_MASK +			| DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN); +	writel(reg, dsi->reg_base + DSIM_CLKCTRL_REG); + +	reg = readl(dsi->reg_base + DSIM_PLLCTRL_REG); +	reg &= ~DSIM_PLL_EN; +	writel(reg, dsi->reg_base + DSIM_PLLCTRL_REG); +} + +static int exynos_dsi_init_link(struct exynos_dsi *dsi) +{ +	int timeout; +	u32 reg; +	u32 lanes_mask; + +	/* Initialize FIFO pointers */ +	reg = readl(dsi->reg_base + DSIM_FIFOCTRL_REG); +	reg &= ~0x1f; +	writel(reg, dsi->reg_base + DSIM_FIFOCTRL_REG); + +	usleep_range(9000, 11000); + +	reg |= 0x1f; +	writel(reg, dsi->reg_base + DSIM_FIFOCTRL_REG); + +	usleep_range(9000, 11000); + +	/* DSI configuration */ +	reg = 0; + +	if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { +		reg |= DSIM_VIDEO_MODE; + +		if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH)) +			reg |= DSIM_MFLUSH_VS; +		if (!(dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET)) +			reg |= DSIM_EOT_DISABLE; +		if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) +			reg |= DSIM_SYNC_INFORM; +		if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) +			reg |= DSIM_BURST_MODE; +		if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_AUTO_VERT) +			reg |= DSIM_AUTO_MODE; +		if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSE) +			reg |= DSIM_HSE_MODE; +		if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HFP)) +			reg |= DSIM_HFP_MODE; +		if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HBP)) +			reg |= DSIM_HBP_MODE; +		if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSA)) +			reg |= DSIM_HSA_MODE; +	} + +	switch (dsi->format) { +	case MIPI_DSI_FMT_RGB888: +		reg |= DSIM_MAIN_PIX_FORMAT_RGB888; +		break; +	case MIPI_DSI_FMT_RGB666: +		reg |= DSIM_MAIN_PIX_FORMAT_RGB666; +		break; +	case MIPI_DSI_FMT_RGB666_PACKED: +		reg |= DSIM_MAIN_PIX_FORMAT_RGB666_P; +		break; +	case MIPI_DSI_FMT_RGB565: +		reg |= DSIM_MAIN_PIX_FORMAT_RGB565; +		break; +	default: +		dev_err(dsi->dev, "invalid pixel format\n"); +		return -EINVAL; +	} + +	reg |= DSIM_NUM_OF_DATA_LANE(dsi->lanes - 1); + +	writel(reg, dsi->reg_base + DSIM_CONFIG_REG); + +	reg |= DSIM_LANE_EN_CLK; +	writel(reg, dsi->reg_base + DSIM_CONFIG_REG); + +	lanes_mask = BIT(dsi->lanes) - 1; +	reg |= DSIM_LANE_EN(lanes_mask); +	writel(reg, dsi->reg_base + DSIM_CONFIG_REG); + +	/* Check clock and data lane state are stop state */ +	timeout = 100; +	do { +		if (timeout-- == 0) { +			dev_err(dsi->dev, "waiting for bus lanes timed out\n"); +			return -EFAULT; +		} + +		reg = readl(dsi->reg_base + DSIM_STATUS_REG); +		if ((reg & DSIM_STOP_STATE_DAT(lanes_mask)) +		    != DSIM_STOP_STATE_DAT(lanes_mask)) +			continue; +	} while (!(reg & (DSIM_STOP_STATE_CLK | DSIM_TX_READY_HS_CLK))); + +	reg = readl(dsi->reg_base + DSIM_ESCMODE_REG); +	reg &= ~DSIM_STOP_STATE_CNT_MASK; +	reg |= DSIM_STOP_STATE_CNT(0xf); +	writel(reg, dsi->reg_base + DSIM_ESCMODE_REG); + +	reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff); +	writel(reg, dsi->reg_base + DSIM_TIMEOUT_REG); + +	return 0; +} + +static void exynos_dsi_set_display_mode(struct exynos_dsi *dsi) +{ +	struct videomode *vm = &dsi->vm; +	u32 reg; + +	if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { +		reg = DSIM_CMD_ALLOW(0xf) +			| DSIM_STABLE_VFP(vm->vfront_porch) +			| DSIM_MAIN_VBP(vm->vback_porch); +		writel(reg, dsi->reg_base + DSIM_MVPORCH_REG); + +		reg = DSIM_MAIN_HFP(vm->hfront_porch) +			| DSIM_MAIN_HBP(vm->hback_porch); +		writel(reg, dsi->reg_base + DSIM_MHPORCH_REG); + +		reg = DSIM_MAIN_VSA(vm->vsync_len) +			| DSIM_MAIN_HSA(vm->hsync_len); +		writel(reg, dsi->reg_base + DSIM_MSYNC_REG); +	} + +	reg = DSIM_MAIN_HRESOL(vm->hactive) | DSIM_MAIN_VRESOL(vm->vactive); +	writel(reg, dsi->reg_base + DSIM_MDRESOL_REG); + +	dev_dbg(dsi->dev, "LCD size = %dx%d\n", vm->hactive, vm->vactive); +} + +static void exynos_dsi_set_display_enable(struct exynos_dsi *dsi, bool enable) +{ +	u32 reg; + +	reg = readl(dsi->reg_base + DSIM_MDRESOL_REG); +	if (enable) +		reg |= DSIM_MAIN_STAND_BY; +	else +		reg &= ~DSIM_MAIN_STAND_BY; +	writel(reg, dsi->reg_base + DSIM_MDRESOL_REG); +} + +static int exynos_dsi_wait_for_hdr_fifo(struct exynos_dsi *dsi) +{ +	int timeout = 2000; + +	do { +		u32 reg = readl(dsi->reg_base + DSIM_FIFOCTRL_REG); + +		if (!(reg & DSIM_SFR_HEADER_FULL)) +			return 0; + +		if (!cond_resched()) +			usleep_range(950, 1050); +	} while (--timeout); + +	return -ETIMEDOUT; +} + +static void exynos_dsi_set_cmd_lpm(struct exynos_dsi *dsi, bool lpm) +{ +	u32 v = readl(dsi->reg_base + DSIM_ESCMODE_REG); + +	if (lpm) +		v |= DSIM_CMD_LPDT_LP; +	else +		v &= ~DSIM_CMD_LPDT_LP; + +	writel(v, dsi->reg_base + DSIM_ESCMODE_REG); +} + +static void exynos_dsi_force_bta(struct exynos_dsi *dsi) +{ +	u32 v = readl(dsi->reg_base + DSIM_ESCMODE_REG); + +	v |= DSIM_FORCE_BTA; +	writel(v, dsi->reg_base + DSIM_ESCMODE_REG); +} + +static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi, +					struct exynos_dsi_transfer *xfer) +{ +	struct device *dev = dsi->dev; +	const u8 *payload = xfer->tx_payload + xfer->tx_done; +	u16 length = xfer->tx_len - xfer->tx_done; +	bool first = !xfer->tx_done; +	u32 reg; + +	dev_dbg(dev, "< xfer %p: tx len %u, done %u, rx len %u, done %u\n", +		xfer, xfer->tx_len, xfer->tx_done, xfer->rx_len, xfer->rx_done); + +	if (length > DSI_TX_FIFO_SIZE) +		length = DSI_TX_FIFO_SIZE; + +	xfer->tx_done += length; + +	/* Send payload */ +	while (length >= 4) { +		reg = (payload[3] << 24) | (payload[2] << 16) +					| (payload[1] << 8) | payload[0]; +		writel(reg, dsi->reg_base + DSIM_PAYLOAD_REG); +		payload += 4; +		length -= 4; +	} + +	reg = 0; +	switch (length) { +	case 3: +		reg |= payload[2] << 16; +		/* Fall through */ +	case 2: +		reg |= payload[1] << 8; +		/* Fall through */ +	case 1: +		reg |= payload[0]; +		writel(reg, dsi->reg_base + DSIM_PAYLOAD_REG); +		break; +	case 0: +		/* Do nothing */ +		break; +	} + +	/* Send packet header */ +	if (!first) +		return; + +	reg = (xfer->data[1] << 16) | (xfer->data[0] << 8) | xfer->data_id; +	if (exynos_dsi_wait_for_hdr_fifo(dsi)) { +		dev_err(dev, "waiting for header FIFO timed out\n"); +		return; +	} + +	if (NEQV(xfer->flags & MIPI_DSI_MSG_USE_LPM, +		 dsi->state & DSIM_STATE_CMD_LPM)) { +		exynos_dsi_set_cmd_lpm(dsi, xfer->flags & MIPI_DSI_MSG_USE_LPM); +		dsi->state ^= DSIM_STATE_CMD_LPM; +	} + +	writel(reg, dsi->reg_base + DSIM_PKTHDR_REG); + +	if (xfer->flags & MIPI_DSI_MSG_REQ_ACK) +		exynos_dsi_force_bta(dsi); +} + +static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi, +					struct exynos_dsi_transfer *xfer) +{ +	u8 *payload = xfer->rx_payload + xfer->rx_done; +	bool first = !xfer->rx_done; +	struct device *dev = dsi->dev; +	u16 length; +	u32 reg; + +	if (first) { +		reg = readl(dsi->reg_base + DSIM_RXFIFO_REG); + +		switch (reg & 0x3f) { +		case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE: +		case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE: +			if (xfer->rx_len >= 2) { +				payload[1] = reg >> 16; +				++xfer->rx_done; +			} +			/* Fall through */ +		case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE: +		case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: +			payload[0] = reg >> 8; +			++xfer->rx_done; +			xfer->rx_len = xfer->rx_done; +			xfer->result = 0; +			goto clear_fifo; +		case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT: +			dev_err(dev, "DSI Error Report: 0x%04x\n", +				(reg >> 8) & 0xffff); +			xfer->result = 0; +			goto clear_fifo; +		} + +		length = (reg >> 8) & 0xffff; +		if (length > xfer->rx_len) { +			dev_err(dev, +				"response too long (%u > %u bytes), stripping\n", +				xfer->rx_len, length); +			length = xfer->rx_len; +		} else if (length < xfer->rx_len) +			xfer->rx_len = length; +	} + +	length = xfer->rx_len - xfer->rx_done; +	xfer->rx_done += length; + +	/* Receive payload */ +	while (length >= 4) { +		reg = readl(dsi->reg_base + DSIM_RXFIFO_REG); +		payload[0] = (reg >>  0) & 0xff; +		payload[1] = (reg >>  8) & 0xff; +		payload[2] = (reg >> 16) & 0xff; +		payload[3] = (reg >> 24) & 0xff; +		payload += 4; +		length -= 4; +	} + +	if (length) { +		reg = readl(dsi->reg_base + DSIM_RXFIFO_REG); +		switch (length) { +		case 3: +			payload[2] = (reg >> 16) & 0xff; +			/* Fall through */ +		case 2: +			payload[1] = (reg >> 8) & 0xff; +			/* Fall through */ +		case 1: +			payload[0] = reg & 0xff; +		} +	} + +	if (xfer->rx_done == xfer->rx_len) +		xfer->result = 0; + +clear_fifo: +	length = DSI_RX_FIFO_SIZE / 4; +	do { +		reg = readl(dsi->reg_base + DSIM_RXFIFO_REG); +		if (reg == DSI_RX_FIFO_EMPTY) +			break; +	} while (--length); +} + +static void exynos_dsi_transfer_start(struct exynos_dsi *dsi) +{ +	unsigned long flags; +	struct exynos_dsi_transfer *xfer; +	bool start = false; + +again: +	spin_lock_irqsave(&dsi->transfer_lock, flags); + +	if (list_empty(&dsi->transfer_list)) { +		spin_unlock_irqrestore(&dsi->transfer_lock, flags); +		return; +	} + +	xfer = list_first_entry(&dsi->transfer_list, +					struct exynos_dsi_transfer, list); + +	spin_unlock_irqrestore(&dsi->transfer_lock, flags); + +	if (xfer->tx_len && xfer->tx_done == xfer->tx_len) +		/* waiting for RX */ +		return; + +	exynos_dsi_send_to_fifo(dsi, xfer); + +	if (xfer->tx_len || xfer->rx_len) +		return; + +	xfer->result = 0; +	complete(&xfer->completed); + +	spin_lock_irqsave(&dsi->transfer_lock, flags); + +	list_del_init(&xfer->list); +	start = !list_empty(&dsi->transfer_list); + +	spin_unlock_irqrestore(&dsi->transfer_lock, flags); + +	if (start) +		goto again; +} + +static bool exynos_dsi_transfer_finish(struct exynos_dsi *dsi) +{ +	struct exynos_dsi_transfer *xfer; +	unsigned long flags; +	bool start = true; + +	spin_lock_irqsave(&dsi->transfer_lock, flags); + +	if (list_empty(&dsi->transfer_list)) { +		spin_unlock_irqrestore(&dsi->transfer_lock, flags); +		return false; +	} + +	xfer = list_first_entry(&dsi->transfer_list, +					struct exynos_dsi_transfer, list); + +	spin_unlock_irqrestore(&dsi->transfer_lock, flags); + +	dev_dbg(dsi->dev, +		"> xfer %p, tx_len %u, tx_done %u, rx_len %u, rx_done %u\n", +		xfer, xfer->tx_len, xfer->tx_done, xfer->rx_len, xfer->rx_done); + +	if (xfer->tx_done != xfer->tx_len) +		return true; + +	if (xfer->rx_done != xfer->rx_len) +		exynos_dsi_read_from_fifo(dsi, xfer); + +	if (xfer->rx_done != xfer->rx_len) +		return true; + +	spin_lock_irqsave(&dsi->transfer_lock, flags); + +	list_del_init(&xfer->list); +	start = !list_empty(&dsi->transfer_list); + +	spin_unlock_irqrestore(&dsi->transfer_lock, flags); + +	if (!xfer->rx_len) +		xfer->result = 0; +	complete(&xfer->completed); + +	return start; +} + +static void exynos_dsi_remove_transfer(struct exynos_dsi *dsi, +					struct exynos_dsi_transfer *xfer) +{ +	unsigned long flags; +	bool start; + +	spin_lock_irqsave(&dsi->transfer_lock, flags); + +	if (!list_empty(&dsi->transfer_list) && +	    xfer == list_first_entry(&dsi->transfer_list, +				     struct exynos_dsi_transfer, list)) { +		list_del_init(&xfer->list); +		start = !list_empty(&dsi->transfer_list); +		spin_unlock_irqrestore(&dsi->transfer_lock, flags); +		if (start) +			exynos_dsi_transfer_start(dsi); +		return; +	} + +	list_del_init(&xfer->list); + +	spin_unlock_irqrestore(&dsi->transfer_lock, flags); +} + +static int exynos_dsi_transfer(struct exynos_dsi *dsi, +					struct exynos_dsi_transfer *xfer) +{ +	unsigned long flags; +	bool stopped; + +	xfer->tx_done = 0; +	xfer->rx_done = 0; +	xfer->result = -ETIMEDOUT; +	init_completion(&xfer->completed); + +	spin_lock_irqsave(&dsi->transfer_lock, flags); + +	stopped = list_empty(&dsi->transfer_list); +	list_add_tail(&xfer->list, &dsi->transfer_list); + +	spin_unlock_irqrestore(&dsi->transfer_lock, flags); + +	if (stopped) +		exynos_dsi_transfer_start(dsi); + +	wait_for_completion_timeout(&xfer->completed, +				    msecs_to_jiffies(DSI_XFER_TIMEOUT_MS)); +	if (xfer->result == -ETIMEDOUT) { +		exynos_dsi_remove_transfer(dsi, xfer); +		dev_err(dsi->dev, "xfer timed out: %*ph %*ph\n", 2, xfer->data, +			xfer->tx_len, xfer->tx_payload); +		return -ETIMEDOUT; +	} + +	/* Also covers hardware timeout condition */ +	return xfer->result; +} + +static irqreturn_t exynos_dsi_irq(int irq, void *dev_id) +{ +	struct exynos_dsi *dsi = dev_id; +	u32 status; + +	status = readl(dsi->reg_base + DSIM_INTSRC_REG); +	if (!status) { +		static unsigned long int j; +		if (printk_timed_ratelimit(&j, 500)) +			dev_warn(dsi->dev, "spurious interrupt\n"); +		return IRQ_HANDLED; +	} +	writel(status, dsi->reg_base + DSIM_INTSRC_REG); + +	if (status & DSIM_INT_SW_RST_RELEASE) { +		u32 mask = ~(DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY); +		writel(mask, dsi->reg_base + DSIM_INTMSK_REG); +		complete(&dsi->completed); +		return IRQ_HANDLED; +	} + +	if (!(status & (DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY))) +		return IRQ_HANDLED; + +	if (exynos_dsi_transfer_finish(dsi)) +		exynos_dsi_transfer_start(dsi); + +	return IRQ_HANDLED; +} + +static int exynos_dsi_init(struct exynos_dsi *dsi) +{ +	exynos_dsi_enable_clock(dsi); +	exynos_dsi_reset(dsi); +	enable_irq(dsi->irq); +	exynos_dsi_wait_for_reset(dsi); +	exynos_dsi_init_link(dsi); + +	return 0; +} + +static int exynos_dsi_host_attach(struct mipi_dsi_host *host, +				  struct mipi_dsi_device *device) +{ +	struct exynos_dsi *dsi = host_to_dsi(host); + +	dsi->lanes = device->lanes; +	dsi->format = device->format; +	dsi->mode_flags = device->mode_flags; +	dsi->panel_node = device->dev.of_node; + +	if (dsi->connector.dev) +		drm_helper_hpd_irq_event(dsi->connector.dev); + +	return 0; +} + +static int exynos_dsi_host_detach(struct mipi_dsi_host *host, +				  struct mipi_dsi_device *device) +{ +	struct exynos_dsi *dsi = host_to_dsi(host); + +	dsi->panel_node = NULL; + +	if (dsi->connector.dev) +		drm_helper_hpd_irq_event(dsi->connector.dev); + +	return 0; +} + +/* distinguish between short and long DSI packet types */ +static bool exynos_dsi_is_short_dsi_type(u8 type) +{ +	return (type & 0x0f) <= 8; +} + +static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host, +				       struct mipi_dsi_msg *msg) +{ +	struct exynos_dsi *dsi = host_to_dsi(host); +	struct exynos_dsi_transfer xfer; +	int ret; + +	if (!(dsi->state & DSIM_STATE_INITIALIZED)) { +		ret = exynos_dsi_init(dsi); +		if (ret) +			return ret; +		dsi->state |= DSIM_STATE_INITIALIZED; +	} + +	if (msg->tx_len == 0) +		return -EINVAL; + +	xfer.data_id = msg->type | (msg->channel << 6); + +	if (exynos_dsi_is_short_dsi_type(msg->type)) { +		const char *tx_buf = msg->tx_buf; + +		if (msg->tx_len > 2) +			return -EINVAL; +		xfer.tx_len = 0; +		xfer.data[0] = tx_buf[0]; +		xfer.data[1] = (msg->tx_len == 2) ? tx_buf[1] : 0; +	} else { +		xfer.tx_len = msg->tx_len; +		xfer.data[0] = msg->tx_len & 0xff; +		xfer.data[1] = msg->tx_len >> 8; +		xfer.tx_payload = msg->tx_buf; +	} + +	xfer.rx_len = msg->rx_len; +	xfer.rx_payload = msg->rx_buf; +	xfer.flags = msg->flags; + +	ret = exynos_dsi_transfer(dsi, &xfer); +	return (ret < 0) ? ret : xfer.rx_done; +} + +static const struct mipi_dsi_host_ops exynos_dsi_ops = { +	.attach = exynos_dsi_host_attach, +	.detach = exynos_dsi_host_detach, +	.transfer = exynos_dsi_host_transfer, +}; + +static int exynos_dsi_poweron(struct exynos_dsi *dsi) +{ +	int ret; + +	ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies); +	if (ret < 0) { +		dev_err(dsi->dev, "cannot enable regulators %d\n", ret); +		return ret; +	} + +	ret = clk_prepare_enable(dsi->bus_clk); +	if (ret < 0) { +		dev_err(dsi->dev, "cannot enable bus clock %d\n", ret); +		goto err_bus_clk; +	} + +	ret = clk_prepare_enable(dsi->pll_clk); +	if (ret < 0) { +		dev_err(dsi->dev, "cannot enable pll clock %d\n", ret); +		goto err_pll_clk; +	} + +	ret = phy_power_on(dsi->phy); +	if (ret < 0) { +		dev_err(dsi->dev, "cannot enable phy %d\n", ret); +		goto err_phy; +	} + +	return 0; + +err_phy: +	clk_disable_unprepare(dsi->pll_clk); +err_pll_clk: +	clk_disable_unprepare(dsi->bus_clk); +err_bus_clk: +	regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies); + +	return ret; +} + +static void exynos_dsi_poweroff(struct exynos_dsi *dsi) +{ +	int ret; + +	usleep_range(10000, 20000); + +	if (dsi->state & DSIM_STATE_INITIALIZED) { +		dsi->state &= ~DSIM_STATE_INITIALIZED; + +		exynos_dsi_disable_clock(dsi); + +		disable_irq(dsi->irq); +	} + +	dsi->state &= ~DSIM_STATE_CMD_LPM; + +	phy_power_off(dsi->phy); + +	clk_disable_unprepare(dsi->pll_clk); +	clk_disable_unprepare(dsi->bus_clk); + +	ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies); +	if (ret < 0) +		dev_err(dsi->dev, "cannot disable regulators %d\n", ret); +} + +static int exynos_dsi_enable(struct exynos_dsi *dsi) +{ +	int ret; + +	if (dsi->state & DSIM_STATE_ENABLED) +		return 0; + +	ret = exynos_dsi_poweron(dsi); +	if (ret < 0) +		return ret; + +	ret = drm_panel_enable(dsi->panel); +	if (ret < 0) { +		exynos_dsi_poweroff(dsi); +		return ret; +	} + +	exynos_dsi_set_display_mode(dsi); +	exynos_dsi_set_display_enable(dsi, true); + +	dsi->state |= DSIM_STATE_ENABLED; + +	return 0; +} + +static void exynos_dsi_disable(struct exynos_dsi *dsi) +{ +	if (!(dsi->state & DSIM_STATE_ENABLED)) +		return; + +	exynos_dsi_set_display_enable(dsi, false); +	drm_panel_disable(dsi->panel); +	exynos_dsi_poweroff(dsi); + +	dsi->state &= ~DSIM_STATE_ENABLED; +} + +static void exynos_dsi_dpms(struct exynos_drm_display *display, int mode) +{ +	struct exynos_dsi *dsi = display->ctx; + +	if (dsi->panel) { +		switch (mode) { +		case DRM_MODE_DPMS_ON: +			exynos_dsi_enable(dsi); +			break; +		case DRM_MODE_DPMS_STANDBY: +		case DRM_MODE_DPMS_SUSPEND: +		case DRM_MODE_DPMS_OFF: +			exynos_dsi_disable(dsi); +			break; +		default: +			break; +		} +	} +} + +static enum drm_connector_status +exynos_dsi_detect(struct drm_connector *connector, bool force) +{ +	struct exynos_dsi *dsi = connector_to_dsi(connector); + +	if (!dsi->panel) { +		dsi->panel = of_drm_find_panel(dsi->panel_node); +		if (dsi->panel) +			drm_panel_attach(dsi->panel, &dsi->connector); +	} else if (!dsi->panel_node) { +		struct exynos_drm_display *display; + +		display = platform_get_drvdata(to_platform_device(dsi->dev)); +		exynos_dsi_dpms(display, DRM_MODE_DPMS_OFF); +		drm_panel_detach(dsi->panel); +		dsi->panel = NULL; +	} + +	if (dsi->panel) +		return connector_status_connected; + +	return connector_status_disconnected; +} + +static void exynos_dsi_connector_destroy(struct drm_connector *connector) +{ +} + +static struct drm_connector_funcs exynos_dsi_connector_funcs = { +	.dpms = drm_helper_connector_dpms, +	.detect = exynos_dsi_detect, +	.fill_modes = drm_helper_probe_single_connector_modes, +	.destroy = exynos_dsi_connector_destroy, +}; + +static int exynos_dsi_get_modes(struct drm_connector *connector) +{ +	struct exynos_dsi *dsi = connector_to_dsi(connector); + +	if (dsi->panel) +		return dsi->panel->funcs->get_modes(dsi->panel); + +	return 0; +} + +static int exynos_dsi_mode_valid(struct drm_connector *connector, +				 struct drm_display_mode *mode) +{ +	return MODE_OK; +} + +static struct drm_encoder * +exynos_dsi_best_encoder(struct drm_connector *connector) +{ +	struct exynos_dsi *dsi = connector_to_dsi(connector); + +	return dsi->encoder; +} + +static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = { +	.get_modes = exynos_dsi_get_modes, +	.mode_valid = exynos_dsi_mode_valid, +	.best_encoder = exynos_dsi_best_encoder, +}; + +static int exynos_dsi_create_connector(struct exynos_drm_display *display, +				       struct drm_encoder *encoder) +{ +	struct exynos_dsi *dsi = display->ctx; +	struct drm_connector *connector = &dsi->connector; +	int ret; + +	dsi->encoder = encoder; + +	connector->polled = DRM_CONNECTOR_POLL_HPD; + +	ret = drm_connector_init(encoder->dev, connector, +				 &exynos_dsi_connector_funcs, +				 DRM_MODE_CONNECTOR_DSI); +	if (ret) { +		DRM_ERROR("Failed to initialize connector with drm\n"); +		return ret; +	} + +	drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs); +	drm_sysfs_connector_add(connector); +	drm_mode_connector_attach_encoder(connector, encoder); + +	return 0; +} + +static void exynos_dsi_mode_set(struct exynos_drm_display *display, +			 struct drm_display_mode *mode) +{ +	struct exynos_dsi *dsi = display->ctx; +	struct videomode *vm = &dsi->vm; + +	vm->hactive = mode->hdisplay; +	vm->vactive = mode->vdisplay; +	vm->vfront_porch = mode->vsync_start - mode->vdisplay; +	vm->vback_porch = mode->vtotal - mode->vsync_end; +	vm->vsync_len = mode->vsync_end - mode->vsync_start; +	vm->hfront_porch = mode->hsync_start - mode->hdisplay; +	vm->hback_porch = mode->htotal - mode->hsync_end; +	vm->hsync_len = mode->hsync_end - mode->hsync_start; +} + +static struct exynos_drm_display_ops exynos_dsi_display_ops = { +	.create_connector = exynos_dsi_create_connector, +	.mode_set = exynos_dsi_mode_set, +	.dpms = exynos_dsi_dpms +}; + +static struct exynos_drm_display exynos_dsi_display = { +	.type = EXYNOS_DISPLAY_TYPE_LCD, +	.ops = &exynos_dsi_display_ops, +}; + +/* of_* functions will be removed after merge of of_graph patches */ +static struct device_node * +of_get_child_by_name_reg(struct device_node *parent, const char *name, u32 reg) +{ +	struct device_node *np; + +	for_each_child_of_node(parent, np) { +		u32 r; + +		if (!np->name || of_node_cmp(np->name, name)) +			continue; + +		if (of_property_read_u32(np, "reg", &r) < 0) +			r = 0; + +		if (reg == r) +			break; +	} + +	return np; +} + +static struct device_node *of_graph_get_port_by_reg(struct device_node *parent, +						    u32 reg) +{ +	struct device_node *ports, *port; + +	ports = of_get_child_by_name(parent, "ports"); +	if (ports) +		parent = ports; + +	port = of_get_child_by_name_reg(parent, "port", reg); + +	of_node_put(ports); + +	return port; +} + +static struct device_node * +of_graph_get_endpoint_by_reg(struct device_node *port, u32 reg) +{ +	return of_get_child_by_name_reg(port, "endpoint", reg); +} + +static int exynos_dsi_of_read_u32(const struct device_node *np, +				  const char *propname, u32 *out_value) +{ +	int ret = of_property_read_u32(np, propname, out_value); + +	if (ret < 0) +		pr_err("%s: failed to get '%s' property\n", np->full_name, +		       propname); + +	return ret; +} + +enum { +	DSI_PORT_IN, +	DSI_PORT_OUT +}; + +static int exynos_dsi_parse_dt(struct exynos_dsi *dsi) +{ +	struct device *dev = dsi->dev; +	struct device_node *node = dev->of_node; +	struct device_node *port, *ep; +	int ret; + +	ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency", +				     &dsi->pll_clk_rate); +	if (ret < 0) +		return ret; + +	port = of_graph_get_port_by_reg(node, DSI_PORT_OUT); +	if (!port) { +		dev_err(dev, "no output port specified\n"); +		return -EINVAL; +	} + +	ep = of_graph_get_endpoint_by_reg(port, 0); +	of_node_put(port); +	if (!ep) { +		dev_err(dev, "no endpoint specified in output port\n"); +		return -EINVAL; +	} + +	ret = exynos_dsi_of_read_u32(ep, "samsung,burst-clock-frequency", +				     &dsi->burst_clk_rate); +	if (ret < 0) +		goto end; + +	ret = exynos_dsi_of_read_u32(ep, "samsung,esc-clock-frequency", +				     &dsi->esc_clk_rate); + +end: +	of_node_put(ep); + +	return ret; +} + +static int exynos_dsi_bind(struct device *dev, struct device *master, +				void *data) +{ +	struct drm_device *drm_dev = data; +	struct exynos_dsi *dsi; +	int ret; + +	ret = exynos_drm_create_enc_conn(drm_dev, &exynos_dsi_display); +	if (ret) { +		DRM_ERROR("Encoder create [%d] failed with %d\n", +				exynos_dsi_display.type, ret); +		return ret; +	} + +	dsi = exynos_dsi_display.ctx; + +	return mipi_dsi_host_register(&dsi->dsi_host); +} + +static void exynos_dsi_unbind(struct device *dev, struct device *master, +				void *data) +{ +	struct exynos_dsi *dsi = exynos_dsi_display.ctx; +	struct drm_encoder *encoder = dsi->encoder; + +	exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF); + +	mipi_dsi_host_unregister(&dsi->dsi_host); + +	encoder->funcs->destroy(encoder); +	drm_connector_cleanup(&dsi->connector); +} + +static const struct component_ops exynos_dsi_component_ops = { +	.bind	= exynos_dsi_bind, +	.unbind	= exynos_dsi_unbind, +}; + +static int exynos_dsi_probe(struct platform_device *pdev) +{ +	struct resource *res; +	struct exynos_dsi *dsi; +	int ret; + +	ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR, +					exynos_dsi_display.type); +	if (ret) +		return ret; + +	dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL); +	if (!dsi) { +		dev_err(&pdev->dev, "failed to allocate dsi object.\n"); +		ret = -ENOMEM; +		goto err_del_component; +	} + +	init_completion(&dsi->completed); +	spin_lock_init(&dsi->transfer_lock); +	INIT_LIST_HEAD(&dsi->transfer_list); + +	dsi->dsi_host.ops = &exynos_dsi_ops; +	dsi->dsi_host.dev = &pdev->dev; + +	dsi->dev = &pdev->dev; + +	ret = exynos_dsi_parse_dt(dsi); +	if (ret) +		goto err_del_component; + +	dsi->supplies[0].supply = "vddcore"; +	dsi->supplies[1].supply = "vddio"; +	ret = devm_regulator_bulk_get(&pdev->dev, ARRAY_SIZE(dsi->supplies), +				      dsi->supplies); +	if (ret) { +		dev_info(&pdev->dev, "failed to get regulators: %d\n", ret); +		return -EPROBE_DEFER; +	} + +	dsi->pll_clk = devm_clk_get(&pdev->dev, "pll_clk"); +	if (IS_ERR(dsi->pll_clk)) { +		dev_info(&pdev->dev, "failed to get dsi pll input clock\n"); +		ret = PTR_ERR(dsi->pll_clk); +		goto err_del_component; +	} + +	dsi->bus_clk = devm_clk_get(&pdev->dev, "bus_clk"); +	if (IS_ERR(dsi->bus_clk)) { +		dev_info(&pdev->dev, "failed to get dsi bus clock\n"); +		ret = PTR_ERR(dsi->bus_clk); +		goto err_del_component; +	} + +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	dsi->reg_base = devm_ioremap_resource(&pdev->dev, res); +	if (IS_ERR(dsi->reg_base)) { +		dev_err(&pdev->dev, "failed to remap io region\n"); +		ret = PTR_ERR(dsi->reg_base); +		goto err_del_component; +	} + +	dsi->phy = devm_phy_get(&pdev->dev, "dsim"); +	if (IS_ERR(dsi->phy)) { +		dev_info(&pdev->dev, "failed to get dsim phy\n"); +		ret = PTR_ERR(dsi->phy); +		goto err_del_component; +	} + +	dsi->irq = platform_get_irq(pdev, 0); +	if (dsi->irq < 0) { +		dev_err(&pdev->dev, "failed to request dsi irq resource\n"); +		ret = dsi->irq; +		goto err_del_component; +	} + +	irq_set_status_flags(dsi->irq, IRQ_NOAUTOEN); +	ret = devm_request_threaded_irq(&pdev->dev, dsi->irq, NULL, +					exynos_dsi_irq, IRQF_ONESHOT, +					dev_name(&pdev->dev), dsi); +	if (ret) { +		dev_err(&pdev->dev, "failed to request dsi irq\n"); +		goto err_del_component; +	} + +	exynos_dsi_display.ctx = dsi; + +	platform_set_drvdata(pdev, &exynos_dsi_display); + +	ret = component_add(&pdev->dev, &exynos_dsi_component_ops); +	if (ret) +		goto err_del_component; + +	return ret; + +err_del_component: +	exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR); +	return ret; +} + +static int exynos_dsi_remove(struct platform_device *pdev) +{ +	component_del(&pdev->dev, &exynos_dsi_component_ops); +	exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR); + +	return 0; +} + +static struct of_device_id exynos_dsi_of_match[] = { +	{ .compatible = "samsung,exynos4210-mipi-dsi" }, +	{ } +}; + +struct platform_driver dsi_driver = { +	.probe = exynos_dsi_probe, +	.remove = exynos_dsi_remove, +	.driver = { +		   .name = "exynos-dsi", +		   .owner = THIS_MODULE, +		   .of_match_table = exynos_dsi_of_match, +	}, +}; + +MODULE_AUTHOR("Tomasz Figa <t.figa@samsung.com>"); +MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>"); +MODULE_DESCRIPTION("Samsung SoC MIPI DSI Master"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c new file mode 100644 index 00000000000..7e282e3d603 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c @@ -0,0 +1,197 @@ +/* exynos_drm_encoder.c + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * Authors: + *	Inki Dae <inki.dae@samsung.com> + *	Joonyoung Shim <jy0922.shim@samsung.com> + *	Seung-Woo Kim <sw0312.kim@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> + +#include "exynos_drm_drv.h" +#include "exynos_drm_encoder.h" + +#define to_exynos_encoder(x)	container_of(x, struct exynos_drm_encoder,\ +				drm_encoder) + +/* + * exynos specific encoder structure. + * + * @drm_encoder: encoder object. + * @display: the display structure that maps to this encoder + */ +struct exynos_drm_encoder { +	struct drm_encoder		drm_encoder; +	struct exynos_drm_display	*display; +}; + +static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode) +{ +	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); +	struct exynos_drm_display *display = exynos_encoder->display; + +	DRM_DEBUG_KMS("encoder dpms: %d\n", mode); + +	if (display->ops->dpms) +		display->ops->dpms(display, mode); +} + +static bool +exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder, +			       const struct drm_display_mode *mode, +			       struct drm_display_mode *adjusted_mode) +{ +	struct drm_device *dev = encoder->dev; +	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); +	struct exynos_drm_display *display = exynos_encoder->display; +	struct drm_connector *connector; + +	list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +		if (connector->encoder != encoder) +			continue; + +		if (display->ops->mode_fixup) +			display->ops->mode_fixup(display, connector, mode, +					adjusted_mode); +	} + +	return true; +} + +static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder, +					 struct drm_display_mode *mode, +					 struct drm_display_mode *adjusted_mode) +{ +	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); +	struct exynos_drm_display *display = exynos_encoder->display; + +	if (display->ops->mode_set) +		display->ops->mode_set(display, adjusted_mode); +} + +static void exynos_drm_encoder_prepare(struct drm_encoder *encoder) +{ +	/* drm framework doesn't check NULL. */ +} + +static void exynos_drm_encoder_commit(struct drm_encoder *encoder) +{ +	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); +	struct exynos_drm_display *display = exynos_encoder->display; + +	if (display->ops->dpms) +		display->ops->dpms(display, DRM_MODE_DPMS_ON); + +	if (display->ops->commit) +		display->ops->commit(display); +} + +static void exynos_drm_encoder_disable(struct drm_encoder *encoder) +{ +	struct drm_plane *plane; +	struct drm_device *dev = encoder->dev; + +	exynos_drm_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + +	/* all planes connected to this encoder should be also disabled. */ +	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { +		if (plane->crtc == encoder->crtc) +			plane->funcs->disable_plane(plane); +	} +} + +static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = { +	.dpms		= exynos_drm_encoder_dpms, +	.mode_fixup	= exynos_drm_encoder_mode_fixup, +	.mode_set	= exynos_drm_encoder_mode_set, +	.prepare	= exynos_drm_encoder_prepare, +	.commit		= exynos_drm_encoder_commit, +	.disable	= exynos_drm_encoder_disable, +}; + +static void exynos_drm_encoder_destroy(struct drm_encoder *encoder) +{ +	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); + +	drm_encoder_cleanup(encoder); +	kfree(exynos_encoder); +} + +static struct drm_encoder_funcs exynos_encoder_funcs = { +	.destroy = exynos_drm_encoder_destroy, +}; + +static unsigned int exynos_drm_encoder_clones(struct drm_encoder *encoder) +{ +	struct drm_encoder *clone; +	struct drm_device *dev = encoder->dev; +	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); +	struct exynos_drm_display *display = exynos_encoder->display; +	unsigned int clone_mask = 0; +	int cnt = 0; + +	list_for_each_entry(clone, &dev->mode_config.encoder_list, head) { +		switch (display->type) { +		case EXYNOS_DISPLAY_TYPE_LCD: +		case EXYNOS_DISPLAY_TYPE_HDMI: +		case EXYNOS_DISPLAY_TYPE_VIDI: +			clone_mask |= (1 << (cnt++)); +			break; +		default: +			continue; +		} +	} + +	return clone_mask; +} + +void exynos_drm_encoder_setup(struct drm_device *dev) +{ +	struct drm_encoder *encoder; + +	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) +		encoder->possible_clones = exynos_drm_encoder_clones(encoder); +} + +struct drm_encoder * +exynos_drm_encoder_create(struct drm_device *dev, +			   struct exynos_drm_display *display, +			   unsigned long possible_crtcs) +{ +	struct drm_encoder *encoder; +	struct exynos_drm_encoder *exynos_encoder; + +	if (!possible_crtcs) +		return NULL; + +	exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL); +	if (!exynos_encoder) +		return NULL; + +	exynos_encoder->display = display; +	encoder = &exynos_encoder->drm_encoder; +	encoder->possible_crtcs = possible_crtcs; + +	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); + +	drm_encoder_init(dev, encoder, &exynos_encoder_funcs, +			DRM_MODE_ENCODER_TMDS); + +	drm_encoder_helper_add(encoder, &exynos_encoder_helper_funcs); + +	DRM_DEBUG_KMS("encoder has been created\n"); + +	return encoder; +} + +struct exynos_drm_display *exynos_drm_get_display(struct drm_encoder *encoder) +{ +	return to_exynos_encoder(encoder)->display; +} diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h new file mode 100644 index 00000000000..b7a1620a7e7 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * Authors: + *	Inki Dae <inki.dae@samsung.com> + *	Joonyoung Shim <jy0922.shim@samsung.com> + *	Seung-Woo Kim <sw0312.kim@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#ifndef _EXYNOS_DRM_ENCODER_H_ +#define _EXYNOS_DRM_ENCODER_H_ + +struct exynos_drm_manager; + +void exynos_drm_encoder_setup(struct drm_device *dev); +struct drm_encoder *exynos_drm_encoder_create(struct drm_device *dev, +			struct exynos_drm_display *mgr, +			unsigned long possible_crtcs); +struct exynos_drm_display *exynos_drm_get_display(struct drm_encoder *encoder); + +#endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c new file mode 100644 index 00000000000..65a22cad7b3 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -0,0 +1,327 @@ +/* exynos_drm_fb.c + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * Authors: + *	Inki Dae <inki.dae@samsung.com> + *	Joonyoung Shim <jy0922.shim@samsung.com> + *	Seung-Woo Kim <sw0312.kim@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#include <drm/drmP.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_fb_helper.h> +#include <uapi/drm/exynos_drm.h> + +#include "exynos_drm_drv.h" +#include "exynos_drm_fb.h" +#include "exynos_drm_fbdev.h" +#include "exynos_drm_gem.h" +#include "exynos_drm_iommu.h" +#include "exynos_drm_crtc.h" + +#define to_exynos_fb(x)	container_of(x, struct exynos_drm_fb, fb) + +/* + * exynos specific framebuffer structure. + * + * @fb: drm framebuffer obejct. + * @buf_cnt: a buffer count to drm framebuffer. + * @exynos_gem_obj: array of exynos specific gem object containing a gem object. + */ +struct exynos_drm_fb { +	struct drm_framebuffer		fb; +	unsigned int			buf_cnt; +	struct exynos_drm_gem_obj	*exynos_gem_obj[MAX_FB_BUFFER]; +}; + +static int check_fb_gem_memory_type(struct drm_device *drm_dev, +				struct exynos_drm_gem_obj *exynos_gem_obj) +{ +	unsigned int flags; + +	/* +	 * if exynos drm driver supports iommu then framebuffer can use +	 * all the buffer types. +	 */ +	if (is_drm_iommu_supported(drm_dev)) +		return 0; + +	flags = exynos_gem_obj->flags; + +	/* +	 * without iommu support, not support physically non-continuous memory +	 * for framebuffer. +	 */ +	if (IS_NONCONTIG_BUFFER(flags)) { +		DRM_ERROR("cannot use this gem memory type for fb.\n"); +		return -EINVAL; +	} + +	return 0; +} + +static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) +{ +	struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); +	unsigned int i; + +	/* make sure that overlay data are updated before relesing fb. */ +	exynos_drm_crtc_complete_scanout(fb); + +	drm_framebuffer_cleanup(fb); + +	for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) { +		struct drm_gem_object *obj; + +		if (exynos_fb->exynos_gem_obj[i] == NULL) +			continue; + +		obj = &exynos_fb->exynos_gem_obj[i]->base; +		drm_gem_object_unreference_unlocked(obj); +	} + +	kfree(exynos_fb); +	exynos_fb = NULL; +} + +static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb, +					struct drm_file *file_priv, +					unsigned int *handle) +{ +	struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); + +	/* This fb should have only one gem object. */ +	if (WARN_ON(exynos_fb->buf_cnt != 1)) +		return -EINVAL; + +	return drm_gem_handle_create(file_priv, +			&exynos_fb->exynos_gem_obj[0]->base, handle); +} + +static int exynos_drm_fb_dirty(struct drm_framebuffer *fb, +				struct drm_file *file_priv, unsigned flags, +				unsigned color, struct drm_clip_rect *clips, +				unsigned num_clips) +{ +	/* TODO */ + +	return 0; +} + +static struct drm_framebuffer_funcs exynos_drm_fb_funcs = { +	.destroy	= exynos_drm_fb_destroy, +	.create_handle	= exynos_drm_fb_create_handle, +	.dirty		= exynos_drm_fb_dirty, +}; + +void exynos_drm_fb_set_buf_cnt(struct drm_framebuffer *fb, +						unsigned int cnt) +{ +	struct exynos_drm_fb *exynos_fb; + +	exynos_fb = to_exynos_fb(fb); + +	exynos_fb->buf_cnt = cnt; +} + +unsigned int exynos_drm_fb_get_buf_cnt(struct drm_framebuffer *fb) +{ +	struct exynos_drm_fb *exynos_fb; + +	exynos_fb = to_exynos_fb(fb); + +	return exynos_fb->buf_cnt; +} + +struct drm_framebuffer * +exynos_drm_framebuffer_init(struct drm_device *dev, +			    struct drm_mode_fb_cmd2 *mode_cmd, +			    struct drm_gem_object *obj) +{ +	struct exynos_drm_fb *exynos_fb; +	struct exynos_drm_gem_obj *exynos_gem_obj; +	int ret; + +	exynos_gem_obj = to_exynos_gem_obj(obj); + +	ret = check_fb_gem_memory_type(dev, exynos_gem_obj); +	if (ret < 0) { +		DRM_ERROR("cannot use this gem memory type for fb.\n"); +		return ERR_PTR(-EINVAL); +	} + +	exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); +	if (!exynos_fb) +		return ERR_PTR(-ENOMEM); + +	drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); +	exynos_fb->exynos_gem_obj[0] = exynos_gem_obj; + +	ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); +	if (ret) { +		DRM_ERROR("failed to initialize framebuffer\n"); +		return ERR_PTR(ret); +	} + +	return &exynos_fb->fb; +} + +static u32 exynos_drm_format_num_buffers(struct drm_mode_fb_cmd2 *mode_cmd) +{ +	unsigned int cnt = 0; + +	if (mode_cmd->pixel_format != DRM_FORMAT_NV12) +		return drm_format_num_planes(mode_cmd->pixel_format); + +	while (cnt != MAX_FB_BUFFER) { +		if (!mode_cmd->handles[cnt]) +			break; +		cnt++; +	} + +	/* +	 * check if NV12 or NV12M. +	 * +	 * NV12 +	 * handles[0] = base1, offsets[0] = 0 +	 * handles[1] = base1, offsets[1] = Y_size +	 * +	 * NV12M +	 * handles[0] = base1, offsets[0] = 0 +	 * handles[1] = base2, offsets[1] = 0 +	 */ +	if (cnt == 2) { +		/* +		 * in case of NV12 format, offsets[1] is not 0 and +		 * handles[0] is same as handles[1]. +		 */ +		if (mode_cmd->offsets[1] && +			mode_cmd->handles[0] == mode_cmd->handles[1]) +			cnt = 1; +	} + +	return cnt; +} + +static struct drm_framebuffer * +exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, +		      struct drm_mode_fb_cmd2 *mode_cmd) +{ +	struct drm_gem_object *obj; +	struct exynos_drm_gem_obj *exynos_gem_obj; +	struct exynos_drm_fb *exynos_fb; +	int i, ret; + +	exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); +	if (!exynos_fb) +		return ERR_PTR(-ENOMEM); + +	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); +	if (!obj) { +		DRM_ERROR("failed to lookup gem object\n"); +		ret = -ENOENT; +		goto err_free; +	} + +	drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); +	exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj); +	exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd); + +	DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt); + +	for (i = 1; i < exynos_fb->buf_cnt; i++) { +		obj = drm_gem_object_lookup(dev, file_priv, +				mode_cmd->handles[i]); +		if (!obj) { +			DRM_ERROR("failed to lookup gem object\n"); +			ret = -ENOENT; +			exynos_fb->buf_cnt = i; +			goto err_unreference; +		} + +		exynos_gem_obj = to_exynos_gem_obj(obj); +		exynos_fb->exynos_gem_obj[i] = exynos_gem_obj; + +		ret = check_fb_gem_memory_type(dev, exynos_gem_obj); +		if (ret < 0) { +			DRM_ERROR("cannot use this gem memory type for fb.\n"); +			goto err_unreference; +		} +	} + +	ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); +	if (ret) { +		DRM_ERROR("failed to init framebuffer.\n"); +		goto err_unreference; +	} + +	return &exynos_fb->fb; + +err_unreference: +	for (i = 0; i < exynos_fb->buf_cnt; i++) { +		struct drm_gem_object *obj; + +		obj = &exynos_fb->exynos_gem_obj[i]->base; +		if (obj) +			drm_gem_object_unreference_unlocked(obj); +	} +err_free: +	kfree(exynos_fb); +	return ERR_PTR(ret); +} + +struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb, +						int index) +{ +	struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); +	struct exynos_drm_gem_buf *buffer; + +	if (index >= MAX_FB_BUFFER) +		return NULL; + +	buffer = exynos_fb->exynos_gem_obj[index]->buffer; +	if (!buffer) +		return NULL; + +	DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr); + +	return buffer; +} + +static void exynos_drm_output_poll_changed(struct drm_device *dev) +{ +	struct exynos_drm_private *private = dev->dev_private; +	struct drm_fb_helper *fb_helper = private->fb_helper; + +	if (fb_helper) +		drm_fb_helper_hotplug_event(fb_helper); +	else +		exynos_drm_fbdev_init(dev); +} + +static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = { +	.fb_create = exynos_user_fb_create, +	.output_poll_changed = exynos_drm_output_poll_changed, +}; + +void exynos_drm_mode_config_init(struct drm_device *dev) +{ +	dev->mode_config.min_width = 0; +	dev->mode_config.min_height = 0; + +	/* +	 * set max width and height as default value(4096x4096). +	 * this value would be used to check framebuffer size limitation +	 * at drm_mode_addfb(). +	 */ +	dev->mode_config.max_width = 4096; +	dev->mode_config.max_height = 4096; + +	dev->mode_config.funcs = &exynos_drm_mode_config_funcs; +} diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h new file mode 100644 index 00000000000..517471b3756 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * Authors: + *	Inki Dae <inki.dae@samsung.com> + *	Joonyoung Shim <jy0922.shim@samsung.com> + *	Seung-Woo Kim <sw0312.kim@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#ifndef _EXYNOS_DRM_FB_H_ +#define _EXYNOS_DRM_FB_H + +struct drm_framebuffer * +exynos_drm_framebuffer_init(struct drm_device *dev, +			    struct drm_mode_fb_cmd2 *mode_cmd, +			    struct drm_gem_object *obj); + +/* get memory information of a drm framebuffer */ +struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb, +						 int index); + +void exynos_drm_mode_config_init(struct drm_device *dev); + +/* set a buffer count to drm framebuffer. */ +void exynos_drm_fb_set_buf_cnt(struct drm_framebuffer *fb, +						unsigned int cnt); + +/* get a buffer count to drm framebuffer. */ +unsigned int exynos_drm_fb_get_buf_cnt(struct drm_framebuffer *fb); + +#endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c new file mode 100644 index 00000000000..d771b467cf0 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -0,0 +1,371 @@ +/* exynos_drm_fbdev.c + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * Authors: + *	Inki Dae <inki.dae@samsung.com> + *	Joonyoung Shim <jy0922.shim@samsung.com> + *	Seung-Woo Kim <sw0312.kim@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#include <drm/drmP.h> +#include <drm/drm_crtc.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_crtc_helper.h> +#include <drm/exynos_drm.h> + +#include "exynos_drm_drv.h" +#include "exynos_drm_fb.h" +#include "exynos_drm_fbdev.h" +#include "exynos_drm_gem.h" +#include "exynos_drm_iommu.h" + +#define MAX_CONNECTOR		4 +#define PREFERRED_BPP		32 + +#define to_exynos_fbdev(x)	container_of(x, struct exynos_drm_fbdev,\ +				drm_fb_helper) + +struct exynos_drm_fbdev { +	struct drm_fb_helper		drm_fb_helper; +	struct exynos_drm_gem_obj	*exynos_gem_obj; +}; + +static int exynos_drm_fb_mmap(struct fb_info *info, +			struct vm_area_struct *vma) +{ +	struct drm_fb_helper *helper = info->par; +	struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper); +	struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj; +	struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer; +	unsigned long vm_size; +	int ret; + +	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; + +	vm_size = vma->vm_end - vma->vm_start; + +	if (vm_size > buffer->size) +		return -EINVAL; + +	ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages, +		buffer->dma_addr, buffer->size, &buffer->dma_attrs); +	if (ret < 0) { +		DRM_ERROR("failed to mmap.\n"); +		return ret; +	} + +	return 0; +} + +static struct fb_ops exynos_drm_fb_ops = { +	.owner		= THIS_MODULE, +	.fb_mmap        = exynos_drm_fb_mmap, +	.fb_fillrect	= cfb_fillrect, +	.fb_copyarea	= cfb_copyarea, +	.fb_imageblit	= cfb_imageblit, +	.fb_check_var	= drm_fb_helper_check_var, +	.fb_set_par	= drm_fb_helper_set_par, +	.fb_blank	= drm_fb_helper_blank, +	.fb_pan_display	= drm_fb_helper_pan_display, +	.fb_setcmap	= drm_fb_helper_setcmap, +}; + +static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, +				     struct drm_framebuffer *fb) +{ +	struct fb_info *fbi = helper->fbdev; +	struct drm_device *dev = helper->dev; +	struct exynos_drm_gem_buf *buffer; +	unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3); +	unsigned long offset; + +	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); +	drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height); + +	/* RGB formats use only one buffer */ +	buffer = exynos_drm_fb_buffer(fb, 0); +	if (!buffer) { +		DRM_DEBUG_KMS("buffer is null.\n"); +		return -EFAULT; +	} + +	/* map pages with kernel virtual space. */ +	if (!buffer->kvaddr) { +		if (is_drm_iommu_supported(dev)) { +			unsigned int nr_pages = buffer->size >> PAGE_SHIFT; + +			buffer->kvaddr = (void __iomem *) vmap(buffer->pages, +					nr_pages, VM_MAP, +					pgprot_writecombine(PAGE_KERNEL)); +		} else { +			phys_addr_t dma_addr = buffer->dma_addr; +			if (dma_addr) +				buffer->kvaddr = (void __iomem *)phys_to_virt(dma_addr); +			else +				buffer->kvaddr = (void __iomem *)NULL; +		} +		if (!buffer->kvaddr) { +			DRM_ERROR("failed to map pages to kernel space.\n"); +			return -EIO; +		} +	} + +	/* buffer count to framebuffer always is 1 at booting time. */ +	exynos_drm_fb_set_buf_cnt(fb, 1); + +	offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3); +	offset += fbi->var.yoffset * fb->pitches[0]; + +	fbi->screen_base = buffer->kvaddr + offset; +	fbi->screen_size = size; + +	return 0; +} + +static int exynos_drm_fbdev_create(struct drm_fb_helper *helper, +				    struct drm_fb_helper_surface_size *sizes) +{ +	struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper); +	struct exynos_drm_gem_obj *exynos_gem_obj; +	struct drm_device *dev = helper->dev; +	struct fb_info *fbi; +	struct drm_mode_fb_cmd2 mode_cmd = { 0 }; +	struct platform_device *pdev = dev->platformdev; +	unsigned long size; +	int ret; + +	DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n", +			sizes->surface_width, sizes->surface_height, +			sizes->surface_bpp); + +	mode_cmd.width = sizes->surface_width; +	mode_cmd.height = sizes->surface_height; +	mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3); +	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, +							  sizes->surface_depth); + +	mutex_lock(&dev->struct_mutex); + +	fbi = framebuffer_alloc(0, &pdev->dev); +	if (!fbi) { +		DRM_ERROR("failed to allocate fb info.\n"); +		ret = -ENOMEM; +		goto out; +	} + +	size = mode_cmd.pitches[0] * mode_cmd.height; + +	exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size); +	/* +	 * If physically contiguous memory allocation fails and if IOMMU is +	 * supported then try to get buffer from non physically contiguous +	 * memory area. +	 */ +	if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) { +		dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n"); +		exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG, +							size); +	} + +	if (IS_ERR(exynos_gem_obj)) { +		ret = PTR_ERR(exynos_gem_obj); +		goto err_release_framebuffer; +	} + +	exynos_fbdev->exynos_gem_obj = exynos_gem_obj; + +	helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd, +			&exynos_gem_obj->base); +	if (IS_ERR(helper->fb)) { +		DRM_ERROR("failed to create drm framebuffer.\n"); +		ret = PTR_ERR(helper->fb); +		goto err_destroy_gem; +	} + +	helper->fbdev = fbi; + +	fbi->par = helper; +	fbi->flags = FBINFO_FLAG_DEFAULT; +	fbi->fbops = &exynos_drm_fb_ops; + +	ret = fb_alloc_cmap(&fbi->cmap, 256, 0); +	if (ret) { +		DRM_ERROR("failed to allocate cmap.\n"); +		goto err_destroy_framebuffer; +	} + +	ret = exynos_drm_fbdev_update(helper, helper->fb); +	if (ret < 0) +		goto err_dealloc_cmap; + +	mutex_unlock(&dev->struct_mutex); +	return ret; + +err_dealloc_cmap: +	fb_dealloc_cmap(&fbi->cmap); +err_destroy_framebuffer: +	drm_framebuffer_cleanup(helper->fb); +err_destroy_gem: +	exynos_drm_gem_destroy(exynos_gem_obj); +err_release_framebuffer: +	framebuffer_release(fbi); + +/* + * if failed, all resources allocated above would be released by + * drm_mode_config_cleanup() when drm_load() had been called prior + * to any specific driver such as fimd or hdmi driver. + */ +out: +	mutex_unlock(&dev->struct_mutex); +	return ret; +} + +static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = { +	.fb_probe =	exynos_drm_fbdev_create, +}; + +static bool exynos_drm_fbdev_is_anything_connected(struct drm_device *dev) +{ +	struct drm_connector *connector; +	bool ret = false; + +	mutex_lock(&dev->mode_config.mutex); +	list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +		if (connector->status != connector_status_connected) +			continue; + +		ret = true; +		break; +	} +	mutex_unlock(&dev->mode_config.mutex); + +	return ret; +} + +int exynos_drm_fbdev_init(struct drm_device *dev) +{ +	struct exynos_drm_fbdev *fbdev; +	struct exynos_drm_private *private = dev->dev_private; +	struct drm_fb_helper *helper; +	unsigned int num_crtc; +	int ret; + +	if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector) +		return 0; + +	if (!exynos_drm_fbdev_is_anything_connected(dev)) +		return 0; + +	fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); +	if (!fbdev) +		return -ENOMEM; + +	private->fb_helper = helper = &fbdev->drm_fb_helper; +	helper->funcs = &exynos_drm_fb_helper_funcs; + +	num_crtc = dev->mode_config.num_crtc; + +	ret = drm_fb_helper_init(dev, helper, num_crtc, MAX_CONNECTOR); +	if (ret < 0) { +		DRM_ERROR("failed to initialize drm fb helper.\n"); +		goto err_init; +	} + +	ret = drm_fb_helper_single_add_all_connectors(helper); +	if (ret < 0) { +		DRM_ERROR("failed to register drm_fb_helper_connector.\n"); +		goto err_setup; + +	} + +	/* disable all the possible outputs/crtcs before entering KMS mode */ +	drm_helper_disable_unused_functions(dev); + +	ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP); +	if (ret < 0) { +		DRM_ERROR("failed to set up hw configuration.\n"); +		goto err_setup; +	} + +	return 0; + +err_setup: +	drm_fb_helper_fini(helper); + +err_init: +	private->fb_helper = NULL; +	kfree(fbdev); + +	return ret; +} + +static void exynos_drm_fbdev_destroy(struct drm_device *dev, +				      struct drm_fb_helper *fb_helper) +{ +	struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper); +	struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj; +	struct drm_framebuffer *fb; + +	if (is_drm_iommu_supported(dev) && exynos_gem_obj->buffer->kvaddr) +		vunmap(exynos_gem_obj->buffer->kvaddr); + +	/* release drm framebuffer and real buffer */ +	if (fb_helper->fb && fb_helper->fb->funcs) { +		fb = fb_helper->fb; +		if (fb) { +			drm_framebuffer_unregister_private(fb); +			drm_framebuffer_remove(fb); +		} +	} + +	/* release linux framebuffer */ +	if (fb_helper->fbdev) { +		struct fb_info *info; +		int ret; + +		info = fb_helper->fbdev; +		ret = unregister_framebuffer(info); +		if (ret < 0) +			DRM_DEBUG_KMS("failed unregister_framebuffer()\n"); + +		if (info->cmap.len) +			fb_dealloc_cmap(&info->cmap); + +		framebuffer_release(info); +	} + +	drm_fb_helper_fini(fb_helper); +} + +void exynos_drm_fbdev_fini(struct drm_device *dev) +{ +	struct exynos_drm_private *private = dev->dev_private; +	struct exynos_drm_fbdev *fbdev; + +	if (!private || !private->fb_helper) +		return; + +	fbdev = to_exynos_fbdev(private->fb_helper); + +	if (fbdev->exynos_gem_obj) +		exynos_drm_gem_destroy(fbdev->exynos_gem_obj); + +	exynos_drm_fbdev_destroy(dev, private->fb_helper); +	kfree(fbdev); +	private->fb_helper = NULL; +} + +void exynos_drm_fbdev_restore_mode(struct drm_device *dev) +{ +	struct exynos_drm_private *private = dev->dev_private; + +	if (!private || !private->fb_helper) +		return; + +	drm_fb_helper_restore_fbdev_mode_unlocked(private->fb_helper); +} diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h new file mode 100644 index 00000000000..e16d7f0ae19 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * + * Authors: + *	Inki Dae <inki.dae@samsung.com> + *	Joonyoung Shim <jy0922.shim@samsung.com> + *	Seung-Woo Kim <sw0312.kim@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#ifndef _EXYNOS_DRM_FBDEV_H_ +#define _EXYNOS_DRM_FBDEV_H_ + +int exynos_drm_fbdev_init(struct drm_device *dev); +int exynos_drm_fbdev_reinit(struct drm_device *dev); +void exynos_drm_fbdev_fini(struct drm_device *dev); +void exynos_drm_fbdev_restore_mode(struct drm_device *dev); + +#endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c new file mode 100644 index 00000000000..831dde9034c --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -0,0 +1,1901 @@ +/* + * Copyright (C) 2012 Samsung Electronics Co.Ltd + * Authors: + *	Eunchul Kim <chulspro.kim@samsung.com> + *	Jinyoung Jeon <jy0.jeon@samsung.com> + *	Sangmin Lee <lsmin.lee@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + * + */ +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> +#include <linux/clk.h> +#include <linux/pm_runtime.h> +#include <linux/of.h> +#include <linux/spinlock.h> + +#include <drm/drmP.h> +#include <drm/exynos_drm.h> +#include "regs-fimc.h" +#include "exynos_drm_drv.h" +#include "exynos_drm_ipp.h" +#include "exynos_drm_fimc.h" + +/* + * FIMC stands for Fully Interactive Mobile Camera and + * supports image scaler/rotator and input/output DMA operations. + * input DMA reads image data from the memory. + * output DMA writes image data to memory. + * FIMC supports image rotation and image effect functions. + * + * M2M operation : supports crop/scale/rotation/csc so on. + * Memory ----> FIMC H/W ----> Memory. + * Writeback operation : supports cloned screen with FIMD. + * FIMD ----> FIMC H/W ----> Memory. + * Output operation : supports direct display using local path. + * Memory ----> FIMC H/W ----> FIMD. + */ + +/* + * TODO + * 1. check suspend/resume api if needed. + * 2. need to check use case platform_device_id. + * 3. check src/dst size with, height. + * 4. added check_prepare api for right register. + * 5. need to add supported list in prop_list. + * 6. check prescaler/scaler optimization. + */ + +#define FIMC_MAX_DEVS	4 +#define FIMC_MAX_SRC	2 +#define FIMC_MAX_DST	32 +#define FIMC_SHFACTOR	10 +#define FIMC_BUF_STOP	1 +#define FIMC_BUF_START	2 +#define FIMC_WIDTH_ITU_709	1280 +#define FIMC_REFRESH_MAX	60 +#define FIMC_REFRESH_MIN	12 +#define FIMC_CROP_MAX	8192 +#define FIMC_CROP_MIN	32 +#define FIMC_SCALE_MAX	4224 +#define FIMC_SCALE_MIN	32 + +#define get_fimc_context(dev)	platform_get_drvdata(to_platform_device(dev)) +#define get_ctx_from_ippdrv(ippdrv)	container_of(ippdrv,\ +					struct fimc_context, ippdrv); +enum fimc_wb { +	FIMC_WB_NONE, +	FIMC_WB_A, +	FIMC_WB_B, +}; + +enum { +	FIMC_CLK_LCLK, +	FIMC_CLK_GATE, +	FIMC_CLK_WB_A, +	FIMC_CLK_WB_B, +	FIMC_CLK_MUX, +	FIMC_CLK_PARENT, +	FIMC_CLKS_MAX +}; + +static const char * const fimc_clock_names[] = { +	[FIMC_CLK_LCLK]   = "sclk_fimc", +	[FIMC_CLK_GATE]   = "fimc", +	[FIMC_CLK_WB_A]   = "pxl_async0", +	[FIMC_CLK_WB_B]   = "pxl_async1", +	[FIMC_CLK_MUX]    = "mux", +	[FIMC_CLK_PARENT] = "parent", +}; + +#define FIMC_DEFAULT_LCLK_FREQUENCY 133000000UL + +/* + * A structure of scaler. + * + * @range: narrow, wide. + * @bypass: unused scaler path. + * @up_h: horizontal scale up. + * @up_v: vertical scale up. + * @hratio: horizontal ratio. + * @vratio: vertical ratio. + */ +struct fimc_scaler { +	bool	range; +	bool bypass; +	bool up_h; +	bool up_v; +	u32 hratio; +	u32 vratio; +}; + +/* + * A structure of scaler capability. + * + * find user manual table 43-1. + * @in_hori: scaler input horizontal size. + * @bypass: scaler bypass mode. + * @dst_h_wo_rot: target horizontal size without output rotation. + * @dst_h_rot: target horizontal size with output rotation. + * @rl_w_wo_rot: real width without input rotation. + * @rl_h_rot: real height without output rotation. + */ +struct fimc_capability { +	/* scaler */ +	u32	in_hori; +	u32	bypass; +	/* output rotator */ +	u32	dst_h_wo_rot; +	u32	dst_h_rot; +	/* input rotator */ +	u32	rl_w_wo_rot; +	u32	rl_h_rot; +}; + +/* + * A structure of fimc context. + * + * @ippdrv: prepare initialization using ippdrv. + * @regs_res: register resources. + * @regs: memory mapped io registers. + * @lock: locking of operations. + * @clocks: fimc clocks. + * @clk_frequency: LCLK clock frequency. + * @sysreg: handle to SYSREG block regmap. + * @sc: scaler infomations. + * @pol: porarity of writeback. + * @id: fimc id. + * @irq: irq number. + * @suspended: qos operations. + */ +struct fimc_context { +	struct exynos_drm_ippdrv	ippdrv; +	struct resource	*regs_res; +	void __iomem	*regs; +	spinlock_t	lock; +	struct clk	*clocks[FIMC_CLKS_MAX]; +	u32		clk_frequency; +	struct regmap	*sysreg; +	struct fimc_scaler	sc; +	struct exynos_drm_ipp_pol	pol; +	int	id; +	int	irq; +	bool	suspended; +}; + +static u32 fimc_read(struct fimc_context *ctx, u32 reg) +{ +	return readl(ctx->regs + reg); +} + +static void fimc_write(struct fimc_context *ctx, u32 val, u32 reg) +{ +	writel(val, ctx->regs + reg); +} + +static void fimc_set_bits(struct fimc_context *ctx, u32 reg, u32 bits) +{ +	void __iomem *r = ctx->regs + reg; + +	writel(readl(r) | bits, r); +} + +static void fimc_clear_bits(struct fimc_context *ctx, u32 reg, u32 bits) +{ +	void __iomem *r = ctx->regs + reg; + +	writel(readl(r) & ~bits, r); +} + +static void fimc_sw_reset(struct fimc_context *ctx) +{ +	u32 cfg; + +	/* stop dma operation */ +	cfg = fimc_read(ctx, EXYNOS_CISTATUS); +	if (EXYNOS_CISTATUS_GET_ENVID_STATUS(cfg)) +		fimc_clear_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID); + +	fimc_set_bits(ctx, EXYNOS_CISRCFMT, EXYNOS_CISRCFMT_ITU601_8BIT); + +	/* disable image capture */ +	fimc_clear_bits(ctx, EXYNOS_CIIMGCPT, +		EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN); + +	/* s/w reset */ +	fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_SWRST); + +	/* s/w reset complete */ +	fimc_clear_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_SWRST); + +	/* reset sequence */ +	fimc_write(ctx, 0x0, EXYNOS_CIFCNTSEQ); +} + +static int fimc_set_camblk_fimd0_wb(struct fimc_context *ctx) +{ +	return regmap_update_bits(ctx->sysreg, SYSREG_CAMERA_BLK, +				  SYSREG_FIMD0WB_DEST_MASK, +				  ctx->id << SYSREG_FIMD0WB_DEST_SHIFT); +} + +static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb) +{ +	u32 cfg; + +	DRM_DEBUG_KMS("wb[%d]\n", wb); + +	cfg = fimc_read(ctx, EXYNOS_CIGCTRL); +	cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK | +		EXYNOS_CIGCTRL_SELCAM_ITU_MASK | +		EXYNOS_CIGCTRL_SELCAM_MIPI_MASK | +		EXYNOS_CIGCTRL_SELCAM_FIMC_MASK | +		EXYNOS_CIGCTRL_SELWB_CAMIF_MASK | +		EXYNOS_CIGCTRL_SELWRITEBACK_MASK); + +	switch (wb) { +	case FIMC_WB_A: +		cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_A | +			EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK); +		break; +	case FIMC_WB_B: +		cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_B | +			EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK); +		break; +	case FIMC_WB_NONE: +	default: +		cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A | +			EXYNOS_CIGCTRL_SELWRITEBACK_A | +			EXYNOS_CIGCTRL_SELCAM_MIPI_A | +			EXYNOS_CIGCTRL_SELCAM_FIMC_ITU); +		break; +	} + +	fimc_write(ctx, cfg, EXYNOS_CIGCTRL); +} + +static void fimc_set_polarity(struct fimc_context *ctx, +		struct exynos_drm_ipp_pol *pol) +{ +	u32 cfg; + +	DRM_DEBUG_KMS("inv_pclk[%d]inv_vsync[%d]\n", +		pol->inv_pclk, pol->inv_vsync); +	DRM_DEBUG_KMS("inv_href[%d]inv_hsync[%d]\n", +		pol->inv_href, pol->inv_hsync); + +	cfg = fimc_read(ctx, EXYNOS_CIGCTRL); +	cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC | +		 EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC); + +	if (pol->inv_pclk) +		cfg |= EXYNOS_CIGCTRL_INVPOLPCLK; +	if (pol->inv_vsync) +		cfg |= EXYNOS_CIGCTRL_INVPOLVSYNC; +	if (pol->inv_href) +		cfg |= EXYNOS_CIGCTRL_INVPOLHREF; +	if (pol->inv_hsync) +		cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC; + +	fimc_write(ctx, cfg, EXYNOS_CIGCTRL); +} + +static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable) +{ +	u32 cfg; + +	DRM_DEBUG_KMS("enable[%d]\n", enable); + +	cfg = fimc_read(ctx, EXYNOS_CIGCTRL); +	if (enable) +		cfg |= EXYNOS_CIGCTRL_CAM_JPEG; +	else +		cfg &= ~EXYNOS_CIGCTRL_CAM_JPEG; + +	fimc_write(ctx, cfg, EXYNOS_CIGCTRL); +} + +static void fimc_mask_irq(struct fimc_context *ctx, bool enable) +{ +	u32 cfg; + +	DRM_DEBUG_KMS("enable[%d]\n", enable); + +	cfg = fimc_read(ctx, EXYNOS_CIGCTRL); +	if (enable) { +		cfg &= ~EXYNOS_CIGCTRL_IRQ_OVFEN; +		cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE | EXYNOS_CIGCTRL_IRQ_LEVEL; +	} else +		cfg &= ~EXYNOS_CIGCTRL_IRQ_ENABLE; +	fimc_write(ctx, cfg, EXYNOS_CIGCTRL); +} + +static void fimc_clear_irq(struct fimc_context *ctx) +{ +	fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_CLR); +} + +static bool fimc_check_ovf(struct fimc_context *ctx) +{ +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	u32 status, flag; + +	status = fimc_read(ctx, EXYNOS_CISTATUS); +	flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB | +		EXYNOS_CISTATUS_OVFICR; + +	DRM_DEBUG_KMS("flag[0x%x]\n", flag); + +	if (status & flag) { +		fimc_set_bits(ctx, EXYNOS_CIWDOFST, +			EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB | +			EXYNOS_CIWDOFST_CLROVFICR); +		fimc_clear_bits(ctx, EXYNOS_CIWDOFST, +			EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB | +			EXYNOS_CIWDOFST_CLROVFICR); + +		dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n", +			ctx->id, status); +		return true; +	} + +	return false; +} + +static bool fimc_check_frame_end(struct fimc_context *ctx) +{ +	u32 cfg; + +	cfg = fimc_read(ctx, EXYNOS_CISTATUS); + +	DRM_DEBUG_KMS("cfg[0x%x]\n", cfg); + +	if (!(cfg & EXYNOS_CISTATUS_FRAMEEND)) +		return false; + +	cfg &= ~(EXYNOS_CISTATUS_FRAMEEND); +	fimc_write(ctx, cfg, EXYNOS_CISTATUS); + +	return true; +} + +static int fimc_get_buf_id(struct fimc_context *ctx) +{ +	u32 cfg; +	int frame_cnt, buf_id; + +	cfg = fimc_read(ctx, EXYNOS_CISTATUS2); +	frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg); + +	if (frame_cnt == 0) +		frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg); + +	DRM_DEBUG_KMS("present[%d]before[%d]\n", +		EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg), +		EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg)); + +	if (frame_cnt == 0) { +		DRM_ERROR("failed to get frame count.\n"); +		return -EIO; +	} + +	buf_id = frame_cnt - 1; +	DRM_DEBUG_KMS("buf_id[%d]\n", buf_id); + +	return buf_id; +} + +static void fimc_handle_lastend(struct fimc_context *ctx, bool enable) +{ +	u32 cfg; + +	DRM_DEBUG_KMS("enable[%d]\n", enable); + +	cfg = fimc_read(ctx, EXYNOS_CIOCTRL); +	if (enable) +		cfg |= EXYNOS_CIOCTRL_LASTENDEN; +	else +		cfg &= ~EXYNOS_CIOCTRL_LASTENDEN; + +	fimc_write(ctx, cfg, EXYNOS_CIOCTRL); +} + + +static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt) +{ +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	u32 cfg; + +	DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); + +	/* RGB */ +	cfg = fimc_read(ctx, EXYNOS_CISCCTRL); +	cfg &= ~EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK; + +	switch (fmt) { +	case DRM_FORMAT_RGB565: +		cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565; +		fimc_write(ctx, cfg, EXYNOS_CISCCTRL); +		return 0; +	case DRM_FORMAT_RGB888: +	case DRM_FORMAT_XRGB8888: +		cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888; +		fimc_write(ctx, cfg, EXYNOS_CISCCTRL); +		return 0; +	default: +		/* bypass */ +		break; +	} + +	/* YUV */ +	cfg = fimc_read(ctx, EXYNOS_MSCTRL); +	cfg &= ~(EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK | +		EXYNOS_MSCTRL_C_INT_IN_2PLANE | +		EXYNOS_MSCTRL_ORDER422_YCBYCR); + +	switch (fmt) { +	case DRM_FORMAT_YUYV: +		cfg |= EXYNOS_MSCTRL_ORDER422_YCBYCR; +		break; +	case DRM_FORMAT_YVYU: +		cfg |= EXYNOS_MSCTRL_ORDER422_YCRYCB; +		break; +	case DRM_FORMAT_UYVY: +		cfg |= EXYNOS_MSCTRL_ORDER422_CBYCRY; +		break; +	case DRM_FORMAT_VYUY: +	case DRM_FORMAT_YUV444: +		cfg |= EXYNOS_MSCTRL_ORDER422_CRYCBY; +		break; +	case DRM_FORMAT_NV21: +	case DRM_FORMAT_NV61: +		cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CRCB | +			EXYNOS_MSCTRL_C_INT_IN_2PLANE); +		break; +	case DRM_FORMAT_YUV422: +	case DRM_FORMAT_YUV420: +	case DRM_FORMAT_YVU420: +		cfg |= EXYNOS_MSCTRL_C_INT_IN_3PLANE; +		break; +	case DRM_FORMAT_NV12: +	case DRM_FORMAT_NV12MT: +	case DRM_FORMAT_NV16: +		cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR | +			EXYNOS_MSCTRL_C_INT_IN_2PLANE); +		break; +	default: +		dev_err(ippdrv->dev, "inavlid source yuv order 0x%x.\n", fmt); +		return -EINVAL; +	} + +	fimc_write(ctx, cfg, EXYNOS_MSCTRL); + +	return 0; +} + +static int fimc_src_set_fmt(struct device *dev, u32 fmt) +{ +	struct fimc_context *ctx = get_fimc_context(dev); +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	u32 cfg; + +	DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); + +	cfg = fimc_read(ctx, EXYNOS_MSCTRL); +	cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB; + +	switch (fmt) { +	case DRM_FORMAT_RGB565: +	case DRM_FORMAT_RGB888: +	case DRM_FORMAT_XRGB8888: +		cfg |= EXYNOS_MSCTRL_INFORMAT_RGB; +		break; +	case DRM_FORMAT_YUV444: +		cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420; +		break; +	case DRM_FORMAT_YUYV: +	case DRM_FORMAT_YVYU: +	case DRM_FORMAT_UYVY: +	case DRM_FORMAT_VYUY: +		cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE; +		break; +	case DRM_FORMAT_NV16: +	case DRM_FORMAT_NV61: +	case DRM_FORMAT_YUV422: +		cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422; +		break; +	case DRM_FORMAT_YUV420: +	case DRM_FORMAT_YVU420: +	case DRM_FORMAT_NV12: +	case DRM_FORMAT_NV21: +	case DRM_FORMAT_NV12MT: +		cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420; +		break; +	default: +		dev_err(ippdrv->dev, "inavlid source format 0x%x.\n", fmt); +		return -EINVAL; +	} + +	fimc_write(ctx, cfg, EXYNOS_MSCTRL); + +	cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM); +	cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK; + +	if (fmt == DRM_FORMAT_NV12MT) +		cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32; +	else +		cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR; + +	fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM); + +	return fimc_src_set_fmt_order(ctx, fmt); +} + +static int fimc_src_set_transf(struct device *dev, +		enum drm_exynos_degree degree, +		enum drm_exynos_flip flip, bool *swap) +{ +	struct fimc_context *ctx = get_fimc_context(dev); +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	u32 cfg1, cfg2; + +	DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip); + +	cfg1 = fimc_read(ctx, EXYNOS_MSCTRL); +	cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR | +		EXYNOS_MSCTRL_FLIP_Y_MIRROR); + +	cfg2 = fimc_read(ctx, EXYNOS_CITRGFMT); +	cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE; + +	switch (degree) { +	case EXYNOS_DRM_DEGREE_0: +		if (flip & EXYNOS_DRM_FLIP_VERTICAL) +			cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR; +		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) +			cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR; +		break; +	case EXYNOS_DRM_DEGREE_90: +		cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE; +		if (flip & EXYNOS_DRM_FLIP_VERTICAL) +			cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR; +		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) +			cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR; +		break; +	case EXYNOS_DRM_DEGREE_180: +		cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR | +			EXYNOS_MSCTRL_FLIP_Y_MIRROR); +		if (flip & EXYNOS_DRM_FLIP_VERTICAL) +			cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR; +		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) +			cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR; +		break; +	case EXYNOS_DRM_DEGREE_270: +		cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR | +			EXYNOS_MSCTRL_FLIP_Y_MIRROR); +		cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE; +		if (flip & EXYNOS_DRM_FLIP_VERTICAL) +			cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR; +		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) +			cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR; +		break; +	default: +		dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree); +		return -EINVAL; +	} + +	fimc_write(ctx, cfg1, EXYNOS_MSCTRL); +	fimc_write(ctx, cfg2, EXYNOS_CITRGFMT); +	*swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0; + +	return 0; +} + +static int fimc_set_window(struct fimc_context *ctx, +		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz) +{ +	u32 cfg, h1, h2, v1, v2; + +	/* cropped image */ +	h1 = pos->x; +	h2 = sz->hsize - pos->w - pos->x; +	v1 = pos->y; +	v2 = sz->vsize - pos->h - pos->y; + +	DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n", +		pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize); +	DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2); + +	/* +	 * set window offset 1, 2 size +	 * check figure 43-21 in user manual +	 */ +	cfg = fimc_read(ctx, EXYNOS_CIWDOFST); +	cfg &= ~(EXYNOS_CIWDOFST_WINHOROFST_MASK | +		EXYNOS_CIWDOFST_WINVEROFST_MASK); +	cfg |= (EXYNOS_CIWDOFST_WINHOROFST(h1) | +		EXYNOS_CIWDOFST_WINVEROFST(v1)); +	cfg |= EXYNOS_CIWDOFST_WINOFSEN; +	fimc_write(ctx, cfg, EXYNOS_CIWDOFST); + +	cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) | +		EXYNOS_CIWDOFST2_WINVEROFST2(v2)); +	fimc_write(ctx, cfg, EXYNOS_CIWDOFST2); + +	return 0; +} + +static int fimc_src_set_size(struct device *dev, int swap, +		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz) +{ +	struct fimc_context *ctx = get_fimc_context(dev); +	struct drm_exynos_pos img_pos = *pos; +	struct drm_exynos_sz img_sz = *sz; +	u32 cfg; + +	DRM_DEBUG_KMS("swap[%d]hsize[%d]vsize[%d]\n", +		swap, sz->hsize, sz->vsize); + +	/* original size */ +	cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) | +		EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize)); + +	fimc_write(ctx, cfg, EXYNOS_ORGISIZE); + +	DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", pos->x, pos->y, pos->w, pos->h); + +	if (swap) { +		img_pos.w = pos->h; +		img_pos.h = pos->w; +		img_sz.hsize = sz->vsize; +		img_sz.vsize = sz->hsize; +	} + +	/* set input DMA image size */ +	cfg = fimc_read(ctx, EXYNOS_CIREAL_ISIZE); +	cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK | +		EXYNOS_CIREAL_ISIZE_WIDTH_MASK); +	cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) | +		EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h)); +	fimc_write(ctx, cfg, EXYNOS_CIREAL_ISIZE); + +	/* +	 * set input FIFO image size +	 * for now, we support only ITU601 8 bit mode +	 */ +	cfg = (EXYNOS_CISRCFMT_ITU601_8BIT | +		EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) | +		EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize)); +	fimc_write(ctx, cfg, EXYNOS_CISRCFMT); + +	/* offset Y(RGB), Cb, Cr */ +	cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) | +		EXYNOS_CIIYOFF_VERTICAL(img_pos.y)); +	fimc_write(ctx, cfg, EXYNOS_CIIYOFF); +	cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) | +		EXYNOS_CIICBOFF_VERTICAL(img_pos.y)); +	fimc_write(ctx, cfg, EXYNOS_CIICBOFF); +	cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) | +		EXYNOS_CIICROFF_VERTICAL(img_pos.y)); +	fimc_write(ctx, cfg, EXYNOS_CIICROFF); + +	return fimc_set_window(ctx, &img_pos, &img_sz); +} + +static int fimc_src_set_addr(struct device *dev, +		struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id, +		enum drm_exynos_ipp_buf_type buf_type) +{ +	struct fimc_context *ctx = get_fimc_context(dev); +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; +	struct drm_exynos_ipp_property *property; +	struct drm_exynos_ipp_config *config; + +	if (!c_node) { +		DRM_ERROR("failed to get c_node.\n"); +		return -EINVAL; +	} + +	property = &c_node->property; + +	DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n", +		property->prop_id, buf_id, buf_type); + +	if (buf_id > FIMC_MAX_SRC) { +		dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id); +		return -ENOMEM; +	} + +	/* address register set */ +	switch (buf_type) { +	case IPP_BUF_ENQUEUE: +		config = &property->config[EXYNOS_DRM_OPS_SRC]; +		fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y], +			EXYNOS_CIIYSA(buf_id)); + +		if (config->fmt == DRM_FORMAT_YVU420) { +			fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR], +				EXYNOS_CIICBSA(buf_id)); +			fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB], +				EXYNOS_CIICRSA(buf_id)); +		} else { +			fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB], +				EXYNOS_CIICBSA(buf_id)); +			fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR], +				EXYNOS_CIICRSA(buf_id)); +		} +		break; +	case IPP_BUF_DEQUEUE: +		fimc_write(ctx, 0x0, EXYNOS_CIIYSA(buf_id)); +		fimc_write(ctx, 0x0, EXYNOS_CIICBSA(buf_id)); +		fimc_write(ctx, 0x0, EXYNOS_CIICRSA(buf_id)); +		break; +	default: +		/* bypass */ +		break; +	} + +	return 0; +} + +static struct exynos_drm_ipp_ops fimc_src_ops = { +	.set_fmt = fimc_src_set_fmt, +	.set_transf = fimc_src_set_transf, +	.set_size = fimc_src_set_size, +	.set_addr = fimc_src_set_addr, +}; + +static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt) +{ +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	u32 cfg; + +	DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); + +	/* RGB */ +	cfg = fimc_read(ctx, EXYNOS_CISCCTRL); +	cfg &= ~EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK; + +	switch (fmt) { +	case DRM_FORMAT_RGB565: +		cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565; +		fimc_write(ctx, cfg, EXYNOS_CISCCTRL); +		return 0; +	case DRM_FORMAT_RGB888: +		cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888; +		fimc_write(ctx, cfg, EXYNOS_CISCCTRL); +		return 0; +	case DRM_FORMAT_XRGB8888: +		cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 | +			EXYNOS_CISCCTRL_EXTRGB_EXTENSION); +		fimc_write(ctx, cfg, EXYNOS_CISCCTRL); +		break; +	default: +		/* bypass */ +		break; +	} + +	/* YUV */ +	cfg = fimc_read(ctx, EXYNOS_CIOCTRL); +	cfg &= ~(EXYNOS_CIOCTRL_ORDER2P_MASK | +		EXYNOS_CIOCTRL_ORDER422_MASK | +		EXYNOS_CIOCTRL_YCBCR_PLANE_MASK); + +	switch (fmt) { +	case DRM_FORMAT_XRGB8888: +		cfg |= EXYNOS_CIOCTRL_ALPHA_OUT; +		break; +	case DRM_FORMAT_YUYV: +		cfg |= EXYNOS_CIOCTRL_ORDER422_YCBYCR; +		break; +	case DRM_FORMAT_YVYU: +		cfg |= EXYNOS_CIOCTRL_ORDER422_YCRYCB; +		break; +	case DRM_FORMAT_UYVY: +		cfg |= EXYNOS_CIOCTRL_ORDER422_CBYCRY; +		break; +	case DRM_FORMAT_VYUY: +		cfg |= EXYNOS_CIOCTRL_ORDER422_CRYCBY; +		break; +	case DRM_FORMAT_NV21: +	case DRM_FORMAT_NV61: +		cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB; +		cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE; +		break; +	case DRM_FORMAT_YUV422: +	case DRM_FORMAT_YUV420: +	case DRM_FORMAT_YVU420: +		cfg |= EXYNOS_CIOCTRL_YCBCR_3PLANE; +		break; +	case DRM_FORMAT_NV12: +	case DRM_FORMAT_NV12MT: +	case DRM_FORMAT_NV16: +		cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR; +		cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE; +		break; +	default: +		dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt); +		return -EINVAL; +	} + +	fimc_write(ctx, cfg, EXYNOS_CIOCTRL); + +	return 0; +} + +static int fimc_dst_set_fmt(struct device *dev, u32 fmt) +{ +	struct fimc_context *ctx = get_fimc_context(dev); +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	u32 cfg; + +	DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); + +	cfg = fimc_read(ctx, EXYNOS_CIEXTEN); + +	if (fmt == DRM_FORMAT_AYUV) { +		cfg |= EXYNOS_CIEXTEN_YUV444_OUT; +		fimc_write(ctx, cfg, EXYNOS_CIEXTEN); +	} else { +		cfg &= ~EXYNOS_CIEXTEN_YUV444_OUT; +		fimc_write(ctx, cfg, EXYNOS_CIEXTEN); + +		cfg = fimc_read(ctx, EXYNOS_CITRGFMT); +		cfg &= ~EXYNOS_CITRGFMT_OUTFORMAT_MASK; + +		switch (fmt) { +		case DRM_FORMAT_RGB565: +		case DRM_FORMAT_RGB888: +		case DRM_FORMAT_XRGB8888: +			cfg |= EXYNOS_CITRGFMT_OUTFORMAT_RGB; +			break; +		case DRM_FORMAT_YUYV: +		case DRM_FORMAT_YVYU: +		case DRM_FORMAT_UYVY: +		case DRM_FORMAT_VYUY: +			cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE; +			break; +		case DRM_FORMAT_NV16: +		case DRM_FORMAT_NV61: +		case DRM_FORMAT_YUV422: +			cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422; +			break; +		case DRM_FORMAT_YUV420: +		case DRM_FORMAT_YVU420: +		case DRM_FORMAT_NV12: +		case DRM_FORMAT_NV12MT: +		case DRM_FORMAT_NV21: +			cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420; +			break; +		default: +			dev_err(ippdrv->dev, "inavlid target format 0x%x.\n", +				fmt); +			return -EINVAL; +		} + +		fimc_write(ctx, cfg, EXYNOS_CITRGFMT); +	} + +	cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM); +	cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK; + +	if (fmt == DRM_FORMAT_NV12MT) +		cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32; +	else +		cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR; + +	fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM); + +	return fimc_dst_set_fmt_order(ctx, fmt); +} + +static int fimc_dst_set_transf(struct device *dev, +		enum drm_exynos_degree degree, +		enum drm_exynos_flip flip, bool *swap) +{ +	struct fimc_context *ctx = get_fimc_context(dev); +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	u32 cfg; + +	DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip); + +	cfg = fimc_read(ctx, EXYNOS_CITRGFMT); +	cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK; +	cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE; + +	switch (degree) { +	case EXYNOS_DRM_DEGREE_0: +		if (flip & EXYNOS_DRM_FLIP_VERTICAL) +			cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR; +		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) +			cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR; +		break; +	case EXYNOS_DRM_DEGREE_90: +		cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE; +		if (flip & EXYNOS_DRM_FLIP_VERTICAL) +			cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR; +		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) +			cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR; +		break; +	case EXYNOS_DRM_DEGREE_180: +		cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR | +			EXYNOS_CITRGFMT_FLIP_Y_MIRROR); +		if (flip & EXYNOS_DRM_FLIP_VERTICAL) +			cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR; +		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) +			cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR; +		break; +	case EXYNOS_DRM_DEGREE_270: +		cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE | +			EXYNOS_CITRGFMT_FLIP_X_MIRROR | +			EXYNOS_CITRGFMT_FLIP_Y_MIRROR); +		if (flip & EXYNOS_DRM_FLIP_VERTICAL) +			cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR; +		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) +			cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR; +		break; +	default: +		dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree); +		return -EINVAL; +	} + +	fimc_write(ctx, cfg, EXYNOS_CITRGFMT); +	*swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0; + +	return 0; +} + +static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc, +		struct drm_exynos_pos *src, struct drm_exynos_pos *dst) +{ +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	u32 cfg, cfg_ext, shfactor; +	u32 pre_dst_width, pre_dst_height; +	u32 hfactor, vfactor; +	int ret = 0; +	u32 src_w, src_h, dst_w, dst_h; + +	cfg_ext = fimc_read(ctx, EXYNOS_CITRGFMT); +	if (cfg_ext & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) { +		src_w = src->h; +		src_h = src->w; +	} else { +		src_w = src->w; +		src_h = src->h; +	} + +	if (cfg_ext & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) { +		dst_w = dst->h; +		dst_h = dst->w; +	} else { +		dst_w = dst->w; +		dst_h = dst->h; +	} + +	/* fimc_ippdrv_check_property assures that dividers are not null */ +	hfactor = fls(src_w / dst_w / 2); +	if (hfactor > FIMC_SHFACTOR / 2) { +		dev_err(ippdrv->dev, "failed to get ratio horizontal.\n"); +		return -EINVAL; +	} + +	vfactor = fls(src_h / dst_h / 2); +	if (vfactor > FIMC_SHFACTOR / 2) { +		dev_err(ippdrv->dev, "failed to get ratio vertical.\n"); +		return -EINVAL; +	} + +	pre_dst_width = src_w >> hfactor; +	pre_dst_height = src_h >> vfactor; +	DRM_DEBUG_KMS("pre_dst_width[%d]pre_dst_height[%d]\n", +		pre_dst_width, pre_dst_height); +	DRM_DEBUG_KMS("hfactor[%d]vfactor[%d]\n", hfactor, vfactor); + +	sc->hratio = (src_w << 14) / (dst_w << hfactor); +	sc->vratio = (src_h << 14) / (dst_h << vfactor); +	sc->up_h = (dst_w >= src_w) ? true : false; +	sc->up_v = (dst_h >= src_h) ? true : false; +	DRM_DEBUG_KMS("hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n", +		sc->hratio, sc->vratio, sc->up_h, sc->up_v); + +	shfactor = FIMC_SHFACTOR - (hfactor + vfactor); +	DRM_DEBUG_KMS("shfactor[%d]\n", shfactor); + +	cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) | +		EXYNOS_CISCPRERATIO_PREHORRATIO(1 << hfactor) | +		EXYNOS_CISCPRERATIO_PREVERRATIO(1 << vfactor)); +	fimc_write(ctx, cfg, EXYNOS_CISCPRERATIO); + +	cfg = (EXYNOS_CISCPREDST_PREDSTWIDTH(pre_dst_width) | +		EXYNOS_CISCPREDST_PREDSTHEIGHT(pre_dst_height)); +	fimc_write(ctx, cfg, EXYNOS_CISCPREDST); + +	return ret; +} + +static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc) +{ +	u32 cfg, cfg_ext; + +	DRM_DEBUG_KMS("range[%d]bypass[%d]up_h[%d]up_v[%d]\n", +		sc->range, sc->bypass, sc->up_h, sc->up_v); +	DRM_DEBUG_KMS("hratio[%d]vratio[%d]\n", +		sc->hratio, sc->vratio); + +	cfg = fimc_read(ctx, EXYNOS_CISCCTRL); +	cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS | +		EXYNOS_CISCCTRL_SCALEUP_H | EXYNOS_CISCCTRL_SCALEUP_V | +		EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK | +		EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK | +		EXYNOS_CISCCTRL_CSCR2Y_WIDE | +		EXYNOS_CISCCTRL_CSCY2R_WIDE); + +	if (sc->range) +		cfg |= (EXYNOS_CISCCTRL_CSCR2Y_WIDE | +			EXYNOS_CISCCTRL_CSCY2R_WIDE); +	if (sc->bypass) +		cfg |= EXYNOS_CISCCTRL_SCALERBYPASS; +	if (sc->up_h) +		cfg |= EXYNOS_CISCCTRL_SCALEUP_H; +	if (sc->up_v) +		cfg |= EXYNOS_CISCCTRL_SCALEUP_V; + +	cfg |= (EXYNOS_CISCCTRL_MAINHORRATIO((sc->hratio >> 6)) | +		EXYNOS_CISCCTRL_MAINVERRATIO((sc->vratio >> 6))); +	fimc_write(ctx, cfg, EXYNOS_CISCCTRL); + +	cfg_ext = fimc_read(ctx, EXYNOS_CIEXTEN); +	cfg_ext &= ~EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK; +	cfg_ext &= ~EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK; +	cfg_ext |= (EXYNOS_CIEXTEN_MAINHORRATIO_EXT(sc->hratio) | +		EXYNOS_CIEXTEN_MAINVERRATIO_EXT(sc->vratio)); +	fimc_write(ctx, cfg_ext, EXYNOS_CIEXTEN); +} + +static int fimc_dst_set_size(struct device *dev, int swap, +		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz) +{ +	struct fimc_context *ctx = get_fimc_context(dev); +	struct drm_exynos_pos img_pos = *pos; +	struct drm_exynos_sz img_sz = *sz; +	u32 cfg; + +	DRM_DEBUG_KMS("swap[%d]hsize[%d]vsize[%d]\n", +		swap, sz->hsize, sz->vsize); + +	/* original size */ +	cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) | +		EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize)); + +	fimc_write(ctx, cfg, EXYNOS_ORGOSIZE); + +	DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", pos->x, pos->y, pos->w, pos->h); + +	/* CSC ITU */ +	cfg = fimc_read(ctx, EXYNOS_CIGCTRL); +	cfg &= ~EXYNOS_CIGCTRL_CSC_MASK; + +	if (sz->hsize >= FIMC_WIDTH_ITU_709) +		cfg |= EXYNOS_CIGCTRL_CSC_ITU709; +	else +		cfg |= EXYNOS_CIGCTRL_CSC_ITU601; + +	fimc_write(ctx, cfg, EXYNOS_CIGCTRL); + +	if (swap) { +		img_pos.w = pos->h; +		img_pos.h = pos->w; +		img_sz.hsize = sz->vsize; +		img_sz.vsize = sz->hsize; +	} + +	/* target image size */ +	cfg = fimc_read(ctx, EXYNOS_CITRGFMT); +	cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK | +		EXYNOS_CITRGFMT_TARGETV_MASK); +	cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) | +		EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h)); +	fimc_write(ctx, cfg, EXYNOS_CITRGFMT); + +	/* target area */ +	cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h); +	fimc_write(ctx, cfg, EXYNOS_CITAREA); + +	/* offset Y(RGB), Cb, Cr */ +	cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) | +		EXYNOS_CIOYOFF_VERTICAL(img_pos.y)); +	fimc_write(ctx, cfg, EXYNOS_CIOYOFF); +	cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) | +		EXYNOS_CIOCBOFF_VERTICAL(img_pos.y)); +	fimc_write(ctx, cfg, EXYNOS_CIOCBOFF); +	cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) | +		EXYNOS_CIOCROFF_VERTICAL(img_pos.y)); +	fimc_write(ctx, cfg, EXYNOS_CIOCROFF); + +	return 0; +} + +static int fimc_dst_get_buf_count(struct fimc_context *ctx) +{ +	u32 cfg, buf_num; + +	cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ); + +	buf_num = hweight32(cfg); + +	DRM_DEBUG_KMS("buf_num[%d]\n", buf_num); + +	return buf_num; +} + +static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id, +		enum drm_exynos_ipp_buf_type buf_type) +{ +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	bool enable; +	u32 cfg; +	u32 mask = 0x00000001 << buf_id; +	int ret = 0; +	unsigned long flags; + +	DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type); + +	spin_lock_irqsave(&ctx->lock, flags); + +	/* mask register set */ +	cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ); + +	switch (buf_type) { +	case IPP_BUF_ENQUEUE: +		enable = true; +		break; +	case IPP_BUF_DEQUEUE: +		enable = false; +		break; +	default: +		dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n"); +		ret =  -EINVAL; +		goto err_unlock; +	} + +	/* sequence id */ +	cfg &= ~mask; +	cfg |= (enable << buf_id); +	fimc_write(ctx, cfg, EXYNOS_CIFCNTSEQ); + +	/* interrupt enable */ +	if (buf_type == IPP_BUF_ENQUEUE && +	    fimc_dst_get_buf_count(ctx) >= FIMC_BUF_START) +		fimc_mask_irq(ctx, true); + +	/* interrupt disable */ +	if (buf_type == IPP_BUF_DEQUEUE && +	    fimc_dst_get_buf_count(ctx) <= FIMC_BUF_STOP) +		fimc_mask_irq(ctx, false); + +err_unlock: +	spin_unlock_irqrestore(&ctx->lock, flags); +	return ret; +} + +static int fimc_dst_set_addr(struct device *dev, +		struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id, +		enum drm_exynos_ipp_buf_type buf_type) +{ +	struct fimc_context *ctx = get_fimc_context(dev); +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; +	struct drm_exynos_ipp_property *property; +	struct drm_exynos_ipp_config *config; + +	if (!c_node) { +		DRM_ERROR("failed to get c_node.\n"); +		return -EINVAL; +	} + +	property = &c_node->property; + +	DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n", +		property->prop_id, buf_id, buf_type); + +	if (buf_id > FIMC_MAX_DST) { +		dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id); +		return -ENOMEM; +	} + +	/* address register set */ +	switch (buf_type) { +	case IPP_BUF_ENQUEUE: +		config = &property->config[EXYNOS_DRM_OPS_DST]; + +		fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y], +			EXYNOS_CIOYSA(buf_id)); + +		if (config->fmt == DRM_FORMAT_YVU420) { +			fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR], +				EXYNOS_CIOCBSA(buf_id)); +			fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB], +				EXYNOS_CIOCRSA(buf_id)); +		} else { +			fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB], +				EXYNOS_CIOCBSA(buf_id)); +			fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR], +				EXYNOS_CIOCRSA(buf_id)); +		} +		break; +	case IPP_BUF_DEQUEUE: +		fimc_write(ctx, 0x0, EXYNOS_CIOYSA(buf_id)); +		fimc_write(ctx, 0x0, EXYNOS_CIOCBSA(buf_id)); +		fimc_write(ctx, 0x0, EXYNOS_CIOCRSA(buf_id)); +		break; +	default: +		/* bypass */ +		break; +	} + +	return fimc_dst_set_buf_seq(ctx, buf_id, buf_type); +} + +static struct exynos_drm_ipp_ops fimc_dst_ops = { +	.set_fmt = fimc_dst_set_fmt, +	.set_transf = fimc_dst_set_transf, +	.set_size = fimc_dst_set_size, +	.set_addr = fimc_dst_set_addr, +}; + +static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable) +{ +	DRM_DEBUG_KMS("enable[%d]\n", enable); + +	if (enable) { +		clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); +		clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]); +		ctx->suspended = false; +	} else { +		clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]); +		clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]); +		ctx->suspended = true; +	} + +	return 0; +} + +static irqreturn_t fimc_irq_handler(int irq, void *dev_id) +{ +	struct fimc_context *ctx = dev_id; +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; +	struct drm_exynos_ipp_event_work *event_work = +		c_node->event_work; +	int buf_id; + +	DRM_DEBUG_KMS("fimc id[%d]\n", ctx->id); + +	fimc_clear_irq(ctx); +	if (fimc_check_ovf(ctx)) +		return IRQ_NONE; + +	if (!fimc_check_frame_end(ctx)) +		return IRQ_NONE; + +	buf_id = fimc_get_buf_id(ctx); +	if (buf_id < 0) +		return IRQ_HANDLED; + +	DRM_DEBUG_KMS("buf_id[%d]\n", buf_id); + +	if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) { +		DRM_ERROR("failed to dequeue.\n"); +		return IRQ_HANDLED; +	} + +	event_work->ippdrv = ippdrv; +	event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id; +	queue_work(ippdrv->event_workq, (struct work_struct *)event_work); + +	return IRQ_HANDLED; +} + +static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv) +{ +	struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list; + +	prop_list->version = 1; +	prop_list->writeback = 1; +	prop_list->refresh_min = FIMC_REFRESH_MIN; +	prop_list->refresh_max = FIMC_REFRESH_MAX; +	prop_list->flip = (1 << EXYNOS_DRM_FLIP_NONE) | +				(1 << EXYNOS_DRM_FLIP_VERTICAL) | +				(1 << EXYNOS_DRM_FLIP_HORIZONTAL); +	prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) | +				(1 << EXYNOS_DRM_DEGREE_90) | +				(1 << EXYNOS_DRM_DEGREE_180) | +				(1 << EXYNOS_DRM_DEGREE_270); +	prop_list->csc = 1; +	prop_list->crop = 1; +	prop_list->crop_max.hsize = FIMC_CROP_MAX; +	prop_list->crop_max.vsize = FIMC_CROP_MAX; +	prop_list->crop_min.hsize = FIMC_CROP_MIN; +	prop_list->crop_min.vsize = FIMC_CROP_MIN; +	prop_list->scale = 1; +	prop_list->scale_max.hsize = FIMC_SCALE_MAX; +	prop_list->scale_max.vsize = FIMC_SCALE_MAX; +	prop_list->scale_min.hsize = FIMC_SCALE_MIN; +	prop_list->scale_min.vsize = FIMC_SCALE_MIN; + +	return 0; +} + +static inline bool fimc_check_drm_flip(enum drm_exynos_flip flip) +{ +	switch (flip) { +	case EXYNOS_DRM_FLIP_NONE: +	case EXYNOS_DRM_FLIP_VERTICAL: +	case EXYNOS_DRM_FLIP_HORIZONTAL: +	case EXYNOS_DRM_FLIP_BOTH: +		return true; +	default: +		DRM_DEBUG_KMS("invalid flip\n"); +		return false; +	} +} + +static int fimc_ippdrv_check_property(struct device *dev, +		struct drm_exynos_ipp_property *property) +{ +	struct fimc_context *ctx = get_fimc_context(dev); +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	struct drm_exynos_ipp_prop_list *pp = &ippdrv->prop_list; +	struct drm_exynos_ipp_config *config; +	struct drm_exynos_pos *pos; +	struct drm_exynos_sz *sz; +	bool swap; +	int i; + +	for_each_ipp_ops(i) { +		if ((i == EXYNOS_DRM_OPS_SRC) && +			(property->cmd == IPP_CMD_WB)) +			continue; + +		config = &property->config[i]; +		pos = &config->pos; +		sz = &config->sz; + +		/* check for flip */ +		if (!fimc_check_drm_flip(config->flip)) { +			DRM_ERROR("invalid flip.\n"); +			goto err_property; +		} + +		/* check for degree */ +		switch (config->degree) { +		case EXYNOS_DRM_DEGREE_90: +		case EXYNOS_DRM_DEGREE_270: +			swap = true; +			break; +		case EXYNOS_DRM_DEGREE_0: +		case EXYNOS_DRM_DEGREE_180: +			swap = false; +			break; +		default: +			DRM_ERROR("invalid degree.\n"); +			goto err_property; +		} + +		/* check for buffer bound */ +		if ((pos->x + pos->w > sz->hsize) || +			(pos->y + pos->h > sz->vsize)) { +			DRM_ERROR("out of buf bound.\n"); +			goto err_property; +		} + +		/* check for crop */ +		if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) { +			if (swap) { +				if ((pos->h < pp->crop_min.hsize) || +					(sz->vsize > pp->crop_max.hsize) || +					(pos->w < pp->crop_min.vsize) || +					(sz->hsize > pp->crop_max.vsize)) { +					DRM_ERROR("out of crop size.\n"); +					goto err_property; +				} +			} else { +				if ((pos->w < pp->crop_min.hsize) || +					(sz->hsize > pp->crop_max.hsize) || +					(pos->h < pp->crop_min.vsize) || +					(sz->vsize > pp->crop_max.vsize)) { +					DRM_ERROR("out of crop size.\n"); +					goto err_property; +				} +			} +		} + +		/* check for scale */ +		if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) { +			if (swap) { +				if ((pos->h < pp->scale_min.hsize) || +					(sz->vsize > pp->scale_max.hsize) || +					(pos->w < pp->scale_min.vsize) || +					(sz->hsize > pp->scale_max.vsize)) { +					DRM_ERROR("out of scale size.\n"); +					goto err_property; +				} +			} else { +				if ((pos->w < pp->scale_min.hsize) || +					(sz->hsize > pp->scale_max.hsize) || +					(pos->h < pp->scale_min.vsize) || +					(sz->vsize > pp->scale_max.vsize)) { +					DRM_ERROR("out of scale size.\n"); +					goto err_property; +				} +			} +		} +	} + +	return 0; + +err_property: +	for_each_ipp_ops(i) { +		if ((i == EXYNOS_DRM_OPS_SRC) && +			(property->cmd == IPP_CMD_WB)) +			continue; + +		config = &property->config[i]; +		pos = &config->pos; +		sz = &config->sz; + +		DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n", +			i ? "dst" : "src", config->flip, config->degree, +			pos->x, pos->y, pos->w, pos->h, +			sz->hsize, sz->vsize); +	} + +	return -EINVAL; +} + +static void fimc_clear_addr(struct fimc_context *ctx) +{ +	int i; + +	for (i = 0; i < FIMC_MAX_SRC; i++) { +		fimc_write(ctx, 0, EXYNOS_CIIYSA(i)); +		fimc_write(ctx, 0, EXYNOS_CIICBSA(i)); +		fimc_write(ctx, 0, EXYNOS_CIICRSA(i)); +	} + +	for (i = 0; i < FIMC_MAX_DST; i++) { +		fimc_write(ctx, 0, EXYNOS_CIOYSA(i)); +		fimc_write(ctx, 0, EXYNOS_CIOCBSA(i)); +		fimc_write(ctx, 0, EXYNOS_CIOCRSA(i)); +	} +} + +static int fimc_ippdrv_reset(struct device *dev) +{ +	struct fimc_context *ctx = get_fimc_context(dev); + +	/* reset h/w block */ +	fimc_sw_reset(ctx); + +	/* reset scaler capability */ +	memset(&ctx->sc, 0x0, sizeof(ctx->sc)); + +	fimc_clear_addr(ctx); + +	return 0; +} + +static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd) +{ +	struct fimc_context *ctx = get_fimc_context(dev); +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; +	struct drm_exynos_ipp_property *property; +	struct drm_exynos_ipp_config *config; +	struct drm_exynos_pos	img_pos[EXYNOS_DRM_OPS_MAX]; +	struct drm_exynos_ipp_set_wb set_wb; +	int ret, i; +	u32 cfg0, cfg1; + +	DRM_DEBUG_KMS("cmd[%d]\n", cmd); + +	if (!c_node) { +		DRM_ERROR("failed to get c_node.\n"); +		return -EINVAL; +	} + +	property = &c_node->property; + +	fimc_mask_irq(ctx, true); + +	for_each_ipp_ops(i) { +		config = &property->config[i]; +		img_pos[i] = config->pos; +	} + +	ret = fimc_set_prescaler(ctx, &ctx->sc, +		&img_pos[EXYNOS_DRM_OPS_SRC], +		&img_pos[EXYNOS_DRM_OPS_DST]); +	if (ret) { +		dev_err(dev, "failed to set precalser.\n"); +		return ret; +	} + +	/* If set ture, we can save jpeg about screen */ +	fimc_handle_jpeg(ctx, false); +	fimc_set_scaler(ctx, &ctx->sc); +	fimc_set_polarity(ctx, &ctx->pol); + +	switch (cmd) { +	case IPP_CMD_M2M: +		fimc_set_type_ctrl(ctx, FIMC_WB_NONE); +		fimc_handle_lastend(ctx, false); + +		/* setup dma */ +		cfg0 = fimc_read(ctx, EXYNOS_MSCTRL); +		cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK; +		cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY; +		fimc_write(ctx, cfg0, EXYNOS_MSCTRL); +		break; +	case IPP_CMD_WB: +		fimc_set_type_ctrl(ctx, FIMC_WB_A); +		fimc_handle_lastend(ctx, true); + +		/* setup FIMD */ +		ret = fimc_set_camblk_fimd0_wb(ctx); +		if (ret < 0) { +			dev_err(dev, "camblk setup failed.\n"); +			return ret; +		} + +		set_wb.enable = 1; +		set_wb.refresh = property->refresh_rate; +		exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb); +		break; +	case IPP_CMD_OUTPUT: +	default: +		ret = -EINVAL; +		dev_err(dev, "invalid operations.\n"); +		return ret; +	} + +	/* Reset status */ +	fimc_write(ctx, 0x0, EXYNOS_CISTATUS); + +	cfg0 = fimc_read(ctx, EXYNOS_CIIMGCPT); +	cfg0 &= ~EXYNOS_CIIMGCPT_IMGCPTEN_SC; +	cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN_SC; + +	/* Scaler */ +	cfg1 = fimc_read(ctx, EXYNOS_CISCCTRL); +	cfg1 &= ~EXYNOS_CISCCTRL_SCAN_MASK; +	cfg1 |= (EXYNOS_CISCCTRL_PROGRESSIVE | +		EXYNOS_CISCCTRL_SCALERSTART); + +	fimc_write(ctx, cfg1, EXYNOS_CISCCTRL); + +	/* Enable image capture*/ +	cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN; +	fimc_write(ctx, cfg0, EXYNOS_CIIMGCPT); + +	/* Disable frame end irq */ +	fimc_clear_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_END_DISABLE); + +	fimc_clear_bits(ctx, EXYNOS_CIOCTRL, EXYNOS_CIOCTRL_WEAVE_MASK); + +	if (cmd == IPP_CMD_M2M) { +		fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID); + +		fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID); +	} + +	return 0; +} + +static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd) +{ +	struct fimc_context *ctx = get_fimc_context(dev); +	struct drm_exynos_ipp_set_wb set_wb = {0, 0}; +	u32 cfg; + +	DRM_DEBUG_KMS("cmd[%d]\n", cmd); + +	switch (cmd) { +	case IPP_CMD_M2M: +		/* Source clear */ +		cfg = fimc_read(ctx, EXYNOS_MSCTRL); +		cfg &= ~EXYNOS_MSCTRL_INPUT_MASK; +		cfg &= ~EXYNOS_MSCTRL_ENVID; +		fimc_write(ctx, cfg, EXYNOS_MSCTRL); +		break; +	case IPP_CMD_WB: +		exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb); +		break; +	case IPP_CMD_OUTPUT: +	default: +		dev_err(dev, "invalid operations.\n"); +		break; +	} + +	fimc_mask_irq(ctx, false); + +	/* reset sequence */ +	fimc_write(ctx, 0x0, EXYNOS_CIFCNTSEQ); + +	/* Scaler disable */ +	fimc_clear_bits(ctx, EXYNOS_CISCCTRL, EXYNOS_CISCCTRL_SCALERSTART); + +	/* Disable image capture */ +	fimc_clear_bits(ctx, EXYNOS_CIIMGCPT, +		EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN); + +	/* Enable frame end irq */ +	fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_END_DISABLE); +} + +static void fimc_put_clocks(struct fimc_context *ctx) +{ +	int i; + +	for (i = 0; i < FIMC_CLKS_MAX; i++) { +		if (IS_ERR(ctx->clocks[i])) +			continue; +		clk_put(ctx->clocks[i]); +		ctx->clocks[i] = ERR_PTR(-EINVAL); +	} +} + +static int fimc_setup_clocks(struct fimc_context *ctx) +{ +	struct device *fimc_dev = ctx->ippdrv.dev; +	struct device *dev; +	int ret, i; + +	for (i = 0; i < FIMC_CLKS_MAX; i++) +		ctx->clocks[i] = ERR_PTR(-EINVAL); + +	for (i = 0; i < FIMC_CLKS_MAX; i++) { +		if (i == FIMC_CLK_WB_A || i == FIMC_CLK_WB_B) +			dev = fimc_dev->parent; +		else +			dev = fimc_dev; + +		ctx->clocks[i] = clk_get(dev, fimc_clock_names[i]); +		if (IS_ERR(ctx->clocks[i])) { +			if (i >= FIMC_CLK_MUX) +				break; +			ret = PTR_ERR(ctx->clocks[i]); +			dev_err(fimc_dev, "failed to get clock: %s\n", +						fimc_clock_names[i]); +			goto e_clk_free; +		} +	} + +	/* Optional FIMC LCLK parent clock setting */ +	if (!IS_ERR(ctx->clocks[FIMC_CLK_PARENT])) { +		ret = clk_set_parent(ctx->clocks[FIMC_CLK_MUX], +				     ctx->clocks[FIMC_CLK_PARENT]); +		if (ret < 0) { +			dev_err(fimc_dev, "failed to set parent.\n"); +			goto e_clk_free; +		} +	} + +	ret = clk_set_rate(ctx->clocks[FIMC_CLK_LCLK], ctx->clk_frequency); +	if (ret < 0) +		goto e_clk_free; + +	ret = clk_prepare_enable(ctx->clocks[FIMC_CLK_LCLK]); +	if (!ret) +		return ret; +e_clk_free: +	fimc_put_clocks(ctx); +	return ret; +} + +static int fimc_parse_dt(struct fimc_context *ctx) +{ +	struct device_node *node = ctx->ippdrv.dev->of_node; + +	/* Handle only devices that support the LCD Writeback data path */ +	if (!of_property_read_bool(node, "samsung,lcd-wb")) +		return -ENODEV; + +	if (of_property_read_u32(node, "clock-frequency", +					&ctx->clk_frequency)) +		ctx->clk_frequency = FIMC_DEFAULT_LCLK_FREQUENCY; + +	ctx->id = of_alias_get_id(node, "fimc"); + +	if (ctx->id < 0) { +		dev_err(ctx->ippdrv.dev, "failed to get node alias id.\n"); +		return -EINVAL; +	} + +	return 0; +} + +static int fimc_probe(struct platform_device *pdev) +{ +	struct device *dev = &pdev->dev; +	struct fimc_context *ctx; +	struct resource *res; +	struct exynos_drm_ippdrv *ippdrv; +	int ret; + +	if (!dev->of_node) { +		dev_err(dev, "device tree node not found.\n"); +		return -ENODEV; +	} + +	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); +	if (!ctx) +		return -ENOMEM; + +	ctx->ippdrv.dev = dev; + +	ret = fimc_parse_dt(ctx); +	if (ret < 0) +		return ret; + +	ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node, +						"samsung,sysreg"); +	if (IS_ERR(ctx->sysreg)) { +		dev_err(dev, "syscon regmap lookup failed.\n"); +		return PTR_ERR(ctx->sysreg); +	} + +	/* resource memory */ +	ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	ctx->regs = devm_ioremap_resource(dev, ctx->regs_res); +	if (IS_ERR(ctx->regs)) +		return PTR_ERR(ctx->regs); + +	/* resource irq */ +	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); +	if (!res) { +		dev_err(dev, "failed to request irq resource.\n"); +		return -ENOENT; +	} + +	ctx->irq = res->start; +	ret = devm_request_threaded_irq(dev, ctx->irq, NULL, fimc_irq_handler, +		IRQF_ONESHOT, "drm_fimc", ctx); +	if (ret < 0) { +		dev_err(dev, "failed to request irq.\n"); +		return ret; +	} + +	ret = fimc_setup_clocks(ctx); +	if (ret < 0) +		return ret; + +	ippdrv = &ctx->ippdrv; +	ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops; +	ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops; +	ippdrv->check_property = fimc_ippdrv_check_property; +	ippdrv->reset = fimc_ippdrv_reset; +	ippdrv->start = fimc_ippdrv_start; +	ippdrv->stop = fimc_ippdrv_stop; +	ret = fimc_init_prop_list(ippdrv); +	if (ret < 0) { +		dev_err(dev, "failed to init property list.\n"); +		goto err_put_clk; +	} + +	DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv); + +	spin_lock_init(&ctx->lock); +	platform_set_drvdata(pdev, ctx); + +	pm_runtime_set_active(dev); +	pm_runtime_enable(dev); + +	ret = exynos_drm_ippdrv_register(ippdrv); +	if (ret < 0) { +		dev_err(dev, "failed to register drm fimc device.\n"); +		goto err_pm_dis; +	} + +	dev_info(dev, "drm fimc registered successfully.\n"); + +	return 0; + +err_pm_dis: +	pm_runtime_disable(dev); +err_put_clk: +	fimc_put_clocks(ctx); + +	return ret; +} + +static int fimc_remove(struct platform_device *pdev) +{ +	struct device *dev = &pdev->dev; +	struct fimc_context *ctx = get_fimc_context(dev); +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; + +	exynos_drm_ippdrv_unregister(ippdrv); + +	fimc_put_clocks(ctx); +	pm_runtime_set_suspended(dev); +	pm_runtime_disable(dev); + +	return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int fimc_suspend(struct device *dev) +{ +	struct fimc_context *ctx = get_fimc_context(dev); + +	DRM_DEBUG_KMS("id[%d]\n", ctx->id); + +	if (pm_runtime_suspended(dev)) +		return 0; + +	return fimc_clk_ctrl(ctx, false); +} + +static int fimc_resume(struct device *dev) +{ +	struct fimc_context *ctx = get_fimc_context(dev); + +	DRM_DEBUG_KMS("id[%d]\n", ctx->id); + +	if (!pm_runtime_suspended(dev)) +		return fimc_clk_ctrl(ctx, true); + +	return 0; +} +#endif + +#ifdef CONFIG_PM_RUNTIME +static int fimc_runtime_suspend(struct device *dev) +{ +	struct fimc_context *ctx = get_fimc_context(dev); + +	DRM_DEBUG_KMS("id[%d]\n", ctx->id); + +	return  fimc_clk_ctrl(ctx, false); +} + +static int fimc_runtime_resume(struct device *dev) +{ +	struct fimc_context *ctx = get_fimc_context(dev); + +	DRM_DEBUG_KMS("id[%d]\n", ctx->id); + +	return  fimc_clk_ctrl(ctx, true); +} +#endif + +static const struct dev_pm_ops fimc_pm_ops = { +	SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume) +	SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL) +}; + +static const struct of_device_id fimc_of_match[] = { +	{ .compatible = "samsung,exynos4210-fimc" }, +	{ .compatible = "samsung,exynos4212-fimc" }, +	{ }, +}; + +struct platform_driver fimc_driver = { +	.probe		= fimc_probe, +	.remove		= fimc_remove, +	.driver		= { +		.of_match_table = fimc_of_match, +		.name	= "exynos-drm-fimc", +		.owner	= THIS_MODULE, +		.pm	= &fimc_pm_ops, +	}, +}; + diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.h b/drivers/gpu/drm/exynos/exynos_drm_fimc.h new file mode 100644 index 00000000000..127a424c5fd --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * + * Authors: + *	Eunchul Kim <chulspro.kim@samsung.com> + *	Jinyoung Jeon <jy0.jeon@samsung.com> + *	Sangmin Lee <lsmin.lee@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#ifndef _EXYNOS_DRM_FIMC_H_ +#define _EXYNOS_DRM_FIMC_H_ + +/* + * TODO + * FIMD output interface notifier callback. + */ + +#endif /* _EXYNOS_DRM_FIMC_H_ */ diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c new file mode 100644 index 00000000000..33161ad3820 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -0,0 +1,1035 @@ +/* exynos_drm_fimd.c + * + * Copyright (C) 2011 Samsung Electronics Co.Ltd + * Authors: + *	Joonyoung Shim <jy0922.shim@samsung.com> + *	Inki Dae <inki.dae@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + * + */ +#include <drm/drmP.h> + +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pm_runtime.h> +#include <linux/component.h> + +#include <video/of_display_timing.h> +#include <video/of_videomode.h> +#include <video/samsung_fimd.h> +#include <drm/exynos_drm.h> + +#include "exynos_drm_drv.h" +#include "exynos_drm_fbdev.h" +#include "exynos_drm_crtc.h" +#include "exynos_drm_iommu.h" + +/* + * FIMD stands for Fully Interactive Mobile Display and + * as a display controller, it transfers contents drawn on memory + * to a LCD Panel through Display Interfaces such as RGB or + * CPU Interface. + */ + +#define FIMD_DEFAULT_FRAMERATE 60 +#define MIN_FB_WIDTH_FOR_16WORD_BURST 128 + +/* position control register for hardware window 0, 2 ~ 4.*/ +#define VIDOSD_A(win)		(VIDOSD_BASE + 0x00 + (win) * 16) +#define VIDOSD_B(win)		(VIDOSD_BASE + 0x04 + (win) * 16) +/* + * size control register for hardware windows 0 and alpha control register + * for hardware windows 1 ~ 4 + */ +#define VIDOSD_C(win)		(VIDOSD_BASE + 0x08 + (win) * 16) +/* size control register for hardware windows 1 ~ 2. */ +#define VIDOSD_D(win)		(VIDOSD_BASE + 0x0C + (win) * 16) + +#define VIDWx_BUF_START(win, buf)	(VIDW_BUF_START(buf) + (win) * 8) +#define VIDWx_BUF_END(win, buf)		(VIDW_BUF_END(buf) + (win) * 8) +#define VIDWx_BUF_SIZE(win, buf)	(VIDW_BUF_SIZE(buf) + (win) * 4) + +/* color key control register for hardware window 1 ~ 4. */ +#define WKEYCON0_BASE(x)		((WKEYCON0 + 0x140) + ((x - 1) * 8)) +/* color key value register for hardware window 1 ~ 4. */ +#define WKEYCON1_BASE(x)		((WKEYCON1 + 0x140) + ((x - 1) * 8)) + +/* FIMD has totally five hardware windows. */ +#define WINDOWS_NR	5 + +#define get_fimd_manager(mgr)	platform_get_drvdata(to_platform_device(dev)) + +struct fimd_driver_data { +	unsigned int timing_base; + +	unsigned int has_shadowcon:1; +	unsigned int has_clksel:1; +	unsigned int has_limited_fmt:1; +}; + +static struct fimd_driver_data s3c64xx_fimd_driver_data = { +	.timing_base = 0x0, +	.has_clksel = 1, +	.has_limited_fmt = 1, +}; + +static struct fimd_driver_data exynos4_fimd_driver_data = { +	.timing_base = 0x0, +	.has_shadowcon = 1, +}; + +static struct fimd_driver_data exynos5_fimd_driver_data = { +	.timing_base = 0x20000, +	.has_shadowcon = 1, +}; + +struct fimd_win_data { +	unsigned int		offset_x; +	unsigned int		offset_y; +	unsigned int		ovl_width; +	unsigned int		ovl_height; +	unsigned int		fb_width; +	unsigned int		fb_height; +	unsigned int		bpp; +	unsigned int		pixel_format; +	dma_addr_t		dma_addr; +	unsigned int		buf_offsize; +	unsigned int		line_size;	/* bytes */ +	bool			enabled; +	bool			resume; +}; + +struct fimd_context { +	struct device			*dev; +	struct drm_device		*drm_dev; +	struct clk			*bus_clk; +	struct clk			*lcd_clk; +	void __iomem			*regs; +	struct drm_display_mode		mode; +	struct fimd_win_data		win_data[WINDOWS_NR]; +	unsigned int			default_win; +	unsigned long			irq_flags; +	u32				vidcon1; +	bool				suspended; +	int				pipe; +	wait_queue_head_t		wait_vsync_queue; +	atomic_t			wait_vsync_event; + +	struct exynos_drm_panel_info panel; +	struct fimd_driver_data *driver_data; +	struct exynos_drm_display *display; +}; + +static const struct of_device_id fimd_driver_dt_match[] = { +	{ .compatible = "samsung,s3c6400-fimd", +	  .data = &s3c64xx_fimd_driver_data }, +	{ .compatible = "samsung,exynos4210-fimd", +	  .data = &exynos4_fimd_driver_data }, +	{ .compatible = "samsung,exynos5250-fimd", +	  .data = &exynos5_fimd_driver_data }, +	{}, +}; + +static inline struct fimd_driver_data *drm_fimd_get_driver_data( +	struct platform_device *pdev) +{ +	const struct of_device_id *of_id = +			of_match_device(fimd_driver_dt_match, &pdev->dev); + +	return (struct fimd_driver_data *)of_id->data; +} + +static void fimd_wait_for_vblank(struct exynos_drm_manager *mgr) +{ +	struct fimd_context *ctx = mgr->ctx; + +	if (ctx->suspended) +		return; + +	atomic_set(&ctx->wait_vsync_event, 1); + +	/* +	 * wait for FIMD to signal VSYNC interrupt or return after +	 * timeout which is set to 50ms (refresh rate of 20). +	 */ +	if (!wait_event_timeout(ctx->wait_vsync_queue, +				!atomic_read(&ctx->wait_vsync_event), +				HZ/20)) +		DRM_DEBUG_KMS("vblank wait timed out.\n"); +} + + +static void fimd_clear_channel(struct exynos_drm_manager *mgr) +{ +	struct fimd_context *ctx = mgr->ctx; +	int win, ch_enabled = 0; + +	DRM_DEBUG_KMS("%s\n", __FILE__); + +	/* Check if any channel is enabled. */ +	for (win = 0; win < WINDOWS_NR; win++) { +		u32 val = readl(ctx->regs + SHADOWCON); +		if (val & SHADOWCON_CHx_ENABLE(win)) { +			val &= ~SHADOWCON_CHx_ENABLE(win); +			writel(val, ctx->regs + SHADOWCON); +			ch_enabled = 1; +		} +	} + +	/* Wait for vsync, as disable channel takes effect at next vsync */ +	if (ch_enabled) +		fimd_wait_for_vblank(mgr); +} + +static int fimd_mgr_initialize(struct exynos_drm_manager *mgr, +			struct drm_device *drm_dev) +{ +	struct fimd_context *ctx = mgr->ctx; +	struct exynos_drm_private *priv; +	priv = drm_dev->dev_private; + +	mgr->drm_dev = ctx->drm_dev = drm_dev; +	mgr->pipe = ctx->pipe = priv->pipe++; + +	/* +	 * enable drm irq mode. +	 * - with irq_enabled = true, we can use the vblank feature. +	 * +	 * P.S. note that we wouldn't use drm irq handler but +	 *	just specific driver own one instead because +	 *	drm framework supports only one irq handler. +	 */ +	drm_dev->irq_enabled = true; + +	/* +	 * with vblank_disable_allowed = true, vblank interrupt will be disabled +	 * by drm timer once a current process gives up ownership of +	 * vblank event.(after drm_vblank_put function is called) +	 */ +	drm_dev->vblank_disable_allowed = true; + +	/* attach this sub driver to iommu mapping if supported. */ +	if (is_drm_iommu_supported(ctx->drm_dev)) { +		/* +		 * If any channel is already active, iommu will throw +		 * a PAGE FAULT when enabled. So clear any channel if enabled. +		 */ +		fimd_clear_channel(mgr); +		drm_iommu_attach_device(ctx->drm_dev, ctx->dev); +	} + +	return 0; +} + +static void fimd_mgr_remove(struct exynos_drm_manager *mgr) +{ +	struct fimd_context *ctx = mgr->ctx; + +	/* detach this sub driver from iommu mapping if supported. */ +	if (is_drm_iommu_supported(ctx->drm_dev)) +		drm_iommu_detach_device(ctx->drm_dev, ctx->dev); +} + +static u32 fimd_calc_clkdiv(struct fimd_context *ctx, +		const struct drm_display_mode *mode) +{ +	unsigned long ideal_clk = mode->htotal * mode->vtotal * mode->vrefresh; +	u32 clkdiv; + +	/* Find the clock divider value that gets us closest to ideal_clk */ +	clkdiv = DIV_ROUND_UP(clk_get_rate(ctx->lcd_clk), ideal_clk); + +	return (clkdiv < 0x100) ? clkdiv : 0xff; +} + +static bool fimd_mode_fixup(struct exynos_drm_manager *mgr, +		const struct drm_display_mode *mode, +		struct drm_display_mode *adjusted_mode) +{ +	if (adjusted_mode->vrefresh == 0) +		adjusted_mode->vrefresh = FIMD_DEFAULT_FRAMERATE; + +	return true; +} + +static void fimd_mode_set(struct exynos_drm_manager *mgr, +		const struct drm_display_mode *in_mode) +{ +	struct fimd_context *ctx = mgr->ctx; + +	drm_mode_copy(&ctx->mode, in_mode); +} + +static void fimd_commit(struct exynos_drm_manager *mgr) +{ +	struct fimd_context *ctx = mgr->ctx; +	struct drm_display_mode *mode = &ctx->mode; +	struct fimd_driver_data *driver_data; +	u32 val, clkdiv, vidcon1; +	int vsync_len, vbpd, vfpd, hsync_len, hbpd, hfpd; + +	driver_data = ctx->driver_data; +	if (ctx->suspended) +		return; + +	/* nothing to do if we haven't set the mode yet */ +	if (mode->htotal == 0 || mode->vtotal == 0) +		return; + +	/* setup polarity values */ +	vidcon1 = ctx->vidcon1; +	if (mode->flags & DRM_MODE_FLAG_NVSYNC) +		vidcon1 |= VIDCON1_INV_VSYNC; +	if (mode->flags & DRM_MODE_FLAG_NHSYNC) +		vidcon1 |= VIDCON1_INV_HSYNC; +	writel(vidcon1, ctx->regs + driver_data->timing_base + VIDCON1); + +	/* setup vertical timing values. */ +	vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start; +	vbpd = mode->crtc_vtotal - mode->crtc_vsync_end; +	vfpd = mode->crtc_vsync_start - mode->crtc_vdisplay; + +	val = VIDTCON0_VBPD(vbpd - 1) | +		VIDTCON0_VFPD(vfpd - 1) | +		VIDTCON0_VSPW(vsync_len - 1); +	writel(val, ctx->regs + driver_data->timing_base + VIDTCON0); + +	/* setup horizontal timing values.  */ +	hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; +	hbpd = mode->crtc_htotal - mode->crtc_hsync_end; +	hfpd = mode->crtc_hsync_start - mode->crtc_hdisplay; + +	val = VIDTCON1_HBPD(hbpd - 1) | +		VIDTCON1_HFPD(hfpd - 1) | +		VIDTCON1_HSPW(hsync_len - 1); +	writel(val, ctx->regs + driver_data->timing_base + VIDTCON1); + +	/* setup horizontal and vertical display size. */ +	val = VIDTCON2_LINEVAL(mode->vdisplay - 1) | +	       VIDTCON2_HOZVAL(mode->hdisplay - 1) | +	       VIDTCON2_LINEVAL_E(mode->vdisplay - 1) | +	       VIDTCON2_HOZVAL_E(mode->hdisplay - 1); +	writel(val, ctx->regs + driver_data->timing_base + VIDTCON2); + +	/* +	 * fields of register with prefix '_F' would be updated +	 * at vsync(same as dma start) +	 */ +	val = VIDCON0_ENVID | VIDCON0_ENVID_F; + +	if (ctx->driver_data->has_clksel) +		val |= VIDCON0_CLKSEL_LCD; + +	clkdiv = fimd_calc_clkdiv(ctx, mode); +	if (clkdiv > 1) +		val |= VIDCON0_CLKVAL_F(clkdiv - 1) | VIDCON0_CLKDIR; + +	writel(val, ctx->regs + VIDCON0); +} + +static int fimd_enable_vblank(struct exynos_drm_manager *mgr) +{ +	struct fimd_context *ctx = mgr->ctx; +	u32 val; + +	if (ctx->suspended) +		return -EPERM; + +	if (!test_and_set_bit(0, &ctx->irq_flags)) { +		val = readl(ctx->regs + VIDINTCON0); + +		val |= VIDINTCON0_INT_ENABLE; +		val |= VIDINTCON0_INT_FRAME; + +		val &= ~VIDINTCON0_FRAMESEL0_MASK; +		val |= VIDINTCON0_FRAMESEL0_VSYNC; +		val &= ~VIDINTCON0_FRAMESEL1_MASK; +		val |= VIDINTCON0_FRAMESEL1_NONE; + +		writel(val, ctx->regs + VIDINTCON0); +	} + +	return 0; +} + +static void fimd_disable_vblank(struct exynos_drm_manager *mgr) +{ +	struct fimd_context *ctx = mgr->ctx; +	u32 val; + +	if (ctx->suspended) +		return; + +	if (test_and_clear_bit(0, &ctx->irq_flags)) { +		val = readl(ctx->regs + VIDINTCON0); + +		val &= ~VIDINTCON0_INT_FRAME; +		val &= ~VIDINTCON0_INT_ENABLE; + +		writel(val, ctx->regs + VIDINTCON0); +	} +} + +static void fimd_win_mode_set(struct exynos_drm_manager *mgr, +			struct exynos_drm_overlay *overlay) +{ +	struct fimd_context *ctx = mgr->ctx; +	struct fimd_win_data *win_data; +	int win; +	unsigned long offset; + +	if (!overlay) { +		DRM_ERROR("overlay is NULL\n"); +		return; +	} + +	win = overlay->zpos; +	if (win == DEFAULT_ZPOS) +		win = ctx->default_win; + +	if (win < 0 || win >= WINDOWS_NR) +		return; + +	offset = overlay->fb_x * (overlay->bpp >> 3); +	offset += overlay->fb_y * overlay->pitch; + +	DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch); + +	win_data = &ctx->win_data[win]; + +	win_data->offset_x = overlay->crtc_x; +	win_data->offset_y = overlay->crtc_y; +	win_data->ovl_width = overlay->crtc_width; +	win_data->ovl_height = overlay->crtc_height; +	win_data->fb_width = overlay->fb_width; +	win_data->fb_height = overlay->fb_height; +	win_data->dma_addr = overlay->dma_addr[0] + offset; +	win_data->bpp = overlay->bpp; +	win_data->pixel_format = overlay->pixel_format; +	win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * +				(overlay->bpp >> 3); +	win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3); + +	DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n", +			win_data->offset_x, win_data->offset_y); +	DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", +			win_data->ovl_width, win_data->ovl_height); +	DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr); +	DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n", +			overlay->fb_width, overlay->crtc_width); +} + +static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win) +{ +	struct fimd_win_data *win_data = &ctx->win_data[win]; +	unsigned long val; + +	val = WINCONx_ENWIN; + +	/* +	 * In case of s3c64xx, window 0 doesn't support alpha channel. +	 * So the request format is ARGB8888 then change it to XRGB8888. +	 */ +	if (ctx->driver_data->has_limited_fmt && !win) { +		if (win_data->pixel_format == DRM_FORMAT_ARGB8888) +			win_data->pixel_format = DRM_FORMAT_XRGB8888; +	} + +	switch (win_data->pixel_format) { +	case DRM_FORMAT_C8: +		val |= WINCON0_BPPMODE_8BPP_PALETTE; +		val |= WINCONx_BURSTLEN_8WORD; +		val |= WINCONx_BYTSWP; +		break; +	case DRM_FORMAT_XRGB1555: +		val |= WINCON0_BPPMODE_16BPP_1555; +		val |= WINCONx_HAWSWP; +		val |= WINCONx_BURSTLEN_16WORD; +		break; +	case DRM_FORMAT_RGB565: +		val |= WINCON0_BPPMODE_16BPP_565; +		val |= WINCONx_HAWSWP; +		val |= WINCONx_BURSTLEN_16WORD; +		break; +	case DRM_FORMAT_XRGB8888: +		val |= WINCON0_BPPMODE_24BPP_888; +		val |= WINCONx_WSWP; +		val |= WINCONx_BURSTLEN_16WORD; +		break; +	case DRM_FORMAT_ARGB8888: +		val |= WINCON1_BPPMODE_25BPP_A1888 +			| WINCON1_BLD_PIX | WINCON1_ALPHA_SEL; +		val |= WINCONx_WSWP; +		val |= WINCONx_BURSTLEN_16WORD; +		break; +	default: +		DRM_DEBUG_KMS("invalid pixel size so using unpacked 24bpp.\n"); + +		val |= WINCON0_BPPMODE_24BPP_888; +		val |= WINCONx_WSWP; +		val |= WINCONx_BURSTLEN_16WORD; +		break; +	} + +	DRM_DEBUG_KMS("bpp = %d\n", win_data->bpp); + +	/* +	 * In case of exynos, setting dma-burst to 16Word causes permanent +	 * tearing for very small buffers, e.g. cursor buffer. Burst Mode +	 * switching which is based on overlay size is not recommended as +	 * overlay size varies alot towards the end of the screen and rapid +	 * movement causes unstable DMA which results into iommu crash/tear. +	 */ + +	if (win_data->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) { +		val &= ~WINCONx_BURSTLEN_MASK; +		val |= WINCONx_BURSTLEN_4WORD; +	} + +	writel(val, ctx->regs + WINCON(win)); +} + +static void fimd_win_set_colkey(struct fimd_context *ctx, unsigned int win) +{ +	unsigned int keycon0 = 0, keycon1 = 0; + +	keycon0 = ~(WxKEYCON0_KEYBL_EN | WxKEYCON0_KEYEN_F | +			WxKEYCON0_DIRCON) | WxKEYCON0_COMPKEY(0); + +	keycon1 = WxKEYCON1_COLVAL(0xffffffff); + +	writel(keycon0, ctx->regs + WKEYCON0_BASE(win)); +	writel(keycon1, ctx->regs + WKEYCON1_BASE(win)); +} + +/** + * shadow_protect_win() - disable updating values from shadow registers at vsync + * + * @win: window to protect registers for + * @protect: 1 to protect (disable updates) + */ +static void fimd_shadow_protect_win(struct fimd_context *ctx, +							int win, bool protect) +{ +	u32 reg, bits, val; + +	if (ctx->driver_data->has_shadowcon) { +		reg = SHADOWCON; +		bits = SHADOWCON_WINx_PROTECT(win); +	} else { +		reg = PRTCON; +		bits = PRTCON_PROTECT; +	} + +	val = readl(ctx->regs + reg); +	if (protect) +		val |= bits; +	else +		val &= ~bits; +	writel(val, ctx->regs + reg); +} + +static void fimd_win_commit(struct exynos_drm_manager *mgr, int zpos) +{ +	struct fimd_context *ctx = mgr->ctx; +	struct fimd_win_data *win_data; +	int win = zpos; +	unsigned long val, alpha, size; +	unsigned int last_x; +	unsigned int last_y; + +	if (ctx->suspended) +		return; + +	if (win == DEFAULT_ZPOS) +		win = ctx->default_win; + +	if (win < 0 || win >= WINDOWS_NR) +		return; + +	win_data = &ctx->win_data[win]; + +	/* If suspended, enable this on resume */ +	if (ctx->suspended) { +		win_data->resume = true; +		return; +	} + +	/* +	 * SHADOWCON/PRTCON register is used for enabling timing. +	 * +	 * for example, once only width value of a register is set, +	 * if the dma is started then fimd hardware could malfunction so +	 * with protect window setting, the register fields with prefix '_F' +	 * wouldn't be updated at vsync also but updated once unprotect window +	 * is set. +	 */ + +	/* protect windows */ +	fimd_shadow_protect_win(ctx, win, true); + +	/* buffer start address */ +	val = (unsigned long)win_data->dma_addr; +	writel(val, ctx->regs + VIDWx_BUF_START(win, 0)); + +	/* buffer end address */ +	size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3); +	val = (unsigned long)(win_data->dma_addr + size); +	writel(val, ctx->regs + VIDWx_BUF_END(win, 0)); + +	DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n", +			(unsigned long)win_data->dma_addr, val, size); +	DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", +			win_data->ovl_width, win_data->ovl_height); + +	/* buffer size */ +	val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) | +		VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size) | +		VIDW_BUF_SIZE_OFFSET_E(win_data->buf_offsize) | +		VIDW_BUF_SIZE_PAGEWIDTH_E(win_data->line_size); +	writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0)); + +	/* OSD position */ +	val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) | +		VIDOSDxA_TOPLEFT_Y(win_data->offset_y) | +		VIDOSDxA_TOPLEFT_X_E(win_data->offset_x) | +		VIDOSDxA_TOPLEFT_Y_E(win_data->offset_y); +	writel(val, ctx->regs + VIDOSD_A(win)); + +	last_x = win_data->offset_x + win_data->ovl_width; +	if (last_x) +		last_x--; +	last_y = win_data->offset_y + win_data->ovl_height; +	if (last_y) +		last_y--; + +	val = VIDOSDxB_BOTRIGHT_X(last_x) | VIDOSDxB_BOTRIGHT_Y(last_y) | +		VIDOSDxB_BOTRIGHT_X_E(last_x) | VIDOSDxB_BOTRIGHT_Y_E(last_y); + +	writel(val, ctx->regs + VIDOSD_B(win)); + +	DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n", +			win_data->offset_x, win_data->offset_y, last_x, last_y); + +	/* hardware window 0 doesn't support alpha channel. */ +	if (win != 0) { +		/* OSD alpha */ +		alpha = VIDISD14C_ALPHA1_R(0xf) | +			VIDISD14C_ALPHA1_G(0xf) | +			VIDISD14C_ALPHA1_B(0xf); + +		writel(alpha, ctx->regs + VIDOSD_C(win)); +	} + +	/* OSD size */ +	if (win != 3 && win != 4) { +		u32 offset = VIDOSD_D(win); +		if (win == 0) +			offset = VIDOSD_C(win); +		val = win_data->ovl_width * win_data->ovl_height; +		writel(val, ctx->regs + offset); + +		DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val); +	} + +	fimd_win_set_pixfmt(ctx, win); + +	/* hardware window 0 doesn't support color key. */ +	if (win != 0) +		fimd_win_set_colkey(ctx, win); + +	/* wincon */ +	val = readl(ctx->regs + WINCON(win)); +	val |= WINCONx_ENWIN; +	writel(val, ctx->regs + WINCON(win)); + +	/* Enable DMA channel and unprotect windows */ +	fimd_shadow_protect_win(ctx, win, false); + +	if (ctx->driver_data->has_shadowcon) { +		val = readl(ctx->regs + SHADOWCON); +		val |= SHADOWCON_CHx_ENABLE(win); +		writel(val, ctx->regs + SHADOWCON); +	} + +	win_data->enabled = true; +} + +static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos) +{ +	struct fimd_context *ctx = mgr->ctx; +	struct fimd_win_data *win_data; +	int win = zpos; +	u32 val; + +	if (win == DEFAULT_ZPOS) +		win = ctx->default_win; + +	if (win < 0 || win >= WINDOWS_NR) +		return; + +	win_data = &ctx->win_data[win]; + +	if (ctx->suspended) { +		/* do not resume this window*/ +		win_data->resume = false; +		return; +	} + +	/* protect windows */ +	fimd_shadow_protect_win(ctx, win, true); + +	/* wincon */ +	val = readl(ctx->regs + WINCON(win)); +	val &= ~WINCONx_ENWIN; +	writel(val, ctx->regs + WINCON(win)); + +	/* unprotect windows */ +	if (ctx->driver_data->has_shadowcon) { +		val = readl(ctx->regs + SHADOWCON); +		val &= ~SHADOWCON_CHx_ENABLE(win); +		writel(val, ctx->regs + SHADOWCON); +	} + +	fimd_shadow_protect_win(ctx, win, false); + +	win_data->enabled = false; +} + +static void fimd_window_suspend(struct exynos_drm_manager *mgr) +{ +	struct fimd_context *ctx = mgr->ctx; +	struct fimd_win_data *win_data; +	int i; + +	for (i = 0; i < WINDOWS_NR; i++) { +		win_data = &ctx->win_data[i]; +		win_data->resume = win_data->enabled; +		if (win_data->enabled) +			fimd_win_disable(mgr, i); +	} +	fimd_wait_for_vblank(mgr); +} + +static void fimd_window_resume(struct exynos_drm_manager *mgr) +{ +	struct fimd_context *ctx = mgr->ctx; +	struct fimd_win_data *win_data; +	int i; + +	for (i = 0; i < WINDOWS_NR; i++) { +		win_data = &ctx->win_data[i]; +		win_data->enabled = win_data->resume; +		win_data->resume = false; +	} +} + +static void fimd_apply(struct exynos_drm_manager *mgr) +{ +	struct fimd_context *ctx = mgr->ctx; +	struct fimd_win_data *win_data; +	int i; + +	for (i = 0; i < WINDOWS_NR; i++) { +		win_data = &ctx->win_data[i]; +		if (win_data->enabled) +			fimd_win_commit(mgr, i); +		else +			fimd_win_disable(mgr, i); +	} + +	fimd_commit(mgr); +} + +static int fimd_poweron(struct exynos_drm_manager *mgr) +{ +	struct fimd_context *ctx = mgr->ctx; +	int ret; + +	if (!ctx->suspended) +		return 0; + +	ctx->suspended = false; + +	pm_runtime_get_sync(ctx->dev); + +	ret = clk_prepare_enable(ctx->bus_clk); +	if (ret < 0) { +		DRM_ERROR("Failed to prepare_enable the bus clk [%d]\n", ret); +		goto bus_clk_err; +	} + +	ret = clk_prepare_enable(ctx->lcd_clk); +	if  (ret < 0) { +		DRM_ERROR("Failed to prepare_enable the lcd clk [%d]\n", ret); +		goto lcd_clk_err; +	} + +	/* if vblank was enabled status, enable it again. */ +	if (test_and_clear_bit(0, &ctx->irq_flags)) { +		ret = fimd_enable_vblank(mgr); +		if (ret) { +			DRM_ERROR("Failed to re-enable vblank [%d]\n", ret); +			goto enable_vblank_err; +		} +	} + +	fimd_window_resume(mgr); + +	fimd_apply(mgr); + +	return 0; + +enable_vblank_err: +	clk_disable_unprepare(ctx->lcd_clk); +lcd_clk_err: +	clk_disable_unprepare(ctx->bus_clk); +bus_clk_err: +	ctx->suspended = true; +	return ret; +} + +static int fimd_poweroff(struct exynos_drm_manager *mgr) +{ +	struct fimd_context *ctx = mgr->ctx; + +	if (ctx->suspended) +		return 0; + +	/* +	 * We need to make sure that all windows are disabled before we +	 * suspend that connector. Otherwise we might try to scan from +	 * a destroyed buffer later. +	 */ +	fimd_window_suspend(mgr); + +	clk_disable_unprepare(ctx->lcd_clk); +	clk_disable_unprepare(ctx->bus_clk); + +	pm_runtime_put_sync(ctx->dev); + +	ctx->suspended = true; +	return 0; +} + +static void fimd_dpms(struct exynos_drm_manager *mgr, int mode) +{ +	DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode); + +	switch (mode) { +	case DRM_MODE_DPMS_ON: +		fimd_poweron(mgr); +		break; +	case DRM_MODE_DPMS_STANDBY: +	case DRM_MODE_DPMS_SUSPEND: +	case DRM_MODE_DPMS_OFF: +		fimd_poweroff(mgr); +		break; +	default: +		DRM_DEBUG_KMS("unspecified mode %d\n", mode); +		break; +	} +} + +static struct exynos_drm_manager_ops fimd_manager_ops = { +	.dpms = fimd_dpms, +	.mode_fixup = fimd_mode_fixup, +	.mode_set = fimd_mode_set, +	.commit = fimd_commit, +	.enable_vblank = fimd_enable_vblank, +	.disable_vblank = fimd_disable_vblank, +	.wait_for_vblank = fimd_wait_for_vblank, +	.win_mode_set = fimd_win_mode_set, +	.win_commit = fimd_win_commit, +	.win_disable = fimd_win_disable, +}; + +static struct exynos_drm_manager fimd_manager = { +	.type = EXYNOS_DISPLAY_TYPE_LCD, +	.ops = &fimd_manager_ops, +}; + +static irqreturn_t fimd_irq_handler(int irq, void *dev_id) +{ +	struct fimd_context *ctx = (struct fimd_context *)dev_id; +	u32 val; + +	val = readl(ctx->regs + VIDINTCON1); + +	if (val & VIDINTCON1_INT_FRAME) +		/* VSYNC interrupt */ +		writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1); + +	/* check the crtc is detached already from encoder */ +	if (ctx->pipe < 0 || !ctx->drm_dev) +		goto out; + +	drm_handle_vblank(ctx->drm_dev, ctx->pipe); +	exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); + +	/* set wait vsync event to zero and wake up queue. */ +	if (atomic_read(&ctx->wait_vsync_event)) { +		atomic_set(&ctx->wait_vsync_event, 0); +		wake_up(&ctx->wait_vsync_queue); +	} +out: +	return IRQ_HANDLED; +} + +static int fimd_bind(struct device *dev, struct device *master, void *data) +{ +	struct fimd_context *ctx = fimd_manager.ctx; +	struct drm_device *drm_dev = data; + +	fimd_mgr_initialize(&fimd_manager, drm_dev); +	exynos_drm_crtc_create(&fimd_manager); +	if (ctx->display) +		exynos_drm_create_enc_conn(drm_dev, ctx->display); + +	return 0; + +} + +static void fimd_unbind(struct device *dev, struct device *master, +			void *data) +{ +	struct exynos_drm_manager *mgr = dev_get_drvdata(dev); +	struct fimd_context *ctx = fimd_manager.ctx; +	struct drm_crtc *crtc = mgr->crtc; + +	fimd_dpms(mgr, DRM_MODE_DPMS_OFF); + +	if (ctx->display) +		exynos_dpi_remove(dev); + +	fimd_mgr_remove(mgr); + +	crtc->funcs->destroy(crtc); +} + +static const struct component_ops fimd_component_ops = { +	.bind	= fimd_bind, +	.unbind = fimd_unbind, +}; + +static int fimd_probe(struct platform_device *pdev) +{ +	struct device *dev = &pdev->dev; +	struct fimd_context *ctx; +	struct resource *res; +	int ret = -EINVAL; + +	ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC, +					fimd_manager.type); +	if (ret) +		return ret; + +	if (!dev->of_node) { +		ret = -ENODEV; +		goto err_del_component; +	} + +	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); +	if (!ctx) { +		ret = -ENOMEM; +		goto err_del_component; +	} + +	ctx->dev = dev; +	ctx->suspended = true; + +	if (of_property_read_bool(dev->of_node, "samsung,invert-vden")) +		ctx->vidcon1 |= VIDCON1_INV_VDEN; +	if (of_property_read_bool(dev->of_node, "samsung,invert-vclk")) +		ctx->vidcon1 |= VIDCON1_INV_VCLK; + +	ctx->bus_clk = devm_clk_get(dev, "fimd"); +	if (IS_ERR(ctx->bus_clk)) { +		dev_err(dev, "failed to get bus clock\n"); +		ret = PTR_ERR(ctx->bus_clk); +		goto err_del_component; +	} + +	ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd"); +	if (IS_ERR(ctx->lcd_clk)) { +		dev_err(dev, "failed to get lcd clock\n"); +		ret = PTR_ERR(ctx->lcd_clk); +		goto err_del_component; +	} + +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + +	ctx->regs = devm_ioremap_resource(dev, res); +	if (IS_ERR(ctx->regs)) { +		ret = PTR_ERR(ctx->regs); +		goto err_del_component; +	} + +	res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "vsync"); +	if (!res) { +		dev_err(dev, "irq request failed.\n"); +		ret = -ENXIO; +		goto err_del_component; +	} + +	ret = devm_request_irq(dev, res->start, fimd_irq_handler, +							0, "drm_fimd", ctx); +	if (ret) { +		dev_err(dev, "irq request failed.\n"); +		goto err_del_component; +	} + +	ctx->driver_data = drm_fimd_get_driver_data(pdev); +	init_waitqueue_head(&ctx->wait_vsync_queue); +	atomic_set(&ctx->wait_vsync_event, 0); + +	platform_set_drvdata(pdev, &fimd_manager); + +	fimd_manager.ctx = ctx; + +	ctx->display = exynos_dpi_probe(dev); +	if (IS_ERR(ctx->display)) +		return PTR_ERR(ctx->display); + +	pm_runtime_enable(&pdev->dev); + +	ret = component_add(&pdev->dev, &fimd_component_ops); +	if (ret) +		goto err_disable_pm_runtime; + +	return ret; + +err_disable_pm_runtime: +	pm_runtime_disable(&pdev->dev); + +err_del_component: +	exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC); +	return ret; +} + +static int fimd_remove(struct platform_device *pdev) +{ +	pm_runtime_disable(&pdev->dev); + +	component_del(&pdev->dev, &fimd_component_ops); +	exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC); + +	return 0; +} + +struct platform_driver fimd_driver = { +	.probe		= fimd_probe, +	.remove		= fimd_remove, +	.driver		= { +		.name	= "exynos4-fb", +		.owner	= THIS_MODULE, +		.of_match_table = fimd_driver_dt_match, +	}, +}; diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c new file mode 100644 index 00000000000..80015871447 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -0,0 +1,1559 @@ +/* + * Copyright (C) 2012 Samsung Electronics Co.Ltd + * Authors: Joonyoung Shim <jy0922.shim@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundationr + */ + +#include <linux/kernel.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <linux/workqueue.h> +#include <linux/dma-mapping.h> +#include <linux/dma-attrs.h> +#include <linux/of.h> + +#include <drm/drmP.h> +#include <drm/exynos_drm.h> +#include "exynos_drm_drv.h" +#include "exynos_drm_g2d.h" +#include "exynos_drm_gem.h" +#include "exynos_drm_iommu.h" + +#define G2D_HW_MAJOR_VER		4 +#define G2D_HW_MINOR_VER		1 + +/* vaild register range set from user: 0x0104 ~ 0x0880 */ +#define G2D_VALID_START			0x0104 +#define G2D_VALID_END			0x0880 + +/* general registers */ +#define G2D_SOFT_RESET			0x0000 +#define G2D_INTEN			0x0004 +#define G2D_INTC_PEND			0x000C +#define G2D_DMA_SFR_BASE_ADDR		0x0080 +#define G2D_DMA_COMMAND			0x0084 +#define G2D_DMA_STATUS			0x008C +#define G2D_DMA_HOLD_CMD		0x0090 + +/* command registers */ +#define G2D_BITBLT_START		0x0100 + +/* registers for base address */ +#define G2D_SRC_BASE_ADDR		0x0304 +#define G2D_SRC_COLOR_MODE		0x030C +#define G2D_SRC_LEFT_TOP		0x0310 +#define G2D_SRC_RIGHT_BOTTOM		0x0314 +#define G2D_SRC_PLANE2_BASE_ADDR	0x0318 +#define G2D_DST_BASE_ADDR		0x0404 +#define G2D_DST_COLOR_MODE		0x040C +#define G2D_DST_LEFT_TOP		0x0410 +#define G2D_DST_RIGHT_BOTTOM		0x0414 +#define G2D_DST_PLANE2_BASE_ADDR	0x0418 +#define G2D_PAT_BASE_ADDR		0x0500 +#define G2D_MSK_BASE_ADDR		0x0520 + +/* G2D_SOFT_RESET */ +#define G2D_SFRCLEAR			(1 << 1) +#define G2D_R				(1 << 0) + +/* G2D_INTEN */ +#define G2D_INTEN_ACF			(1 << 3) +#define G2D_INTEN_UCF			(1 << 2) +#define G2D_INTEN_GCF			(1 << 1) +#define G2D_INTEN_SCF			(1 << 0) + +/* G2D_INTC_PEND */ +#define G2D_INTP_ACMD_FIN		(1 << 3) +#define G2D_INTP_UCMD_FIN		(1 << 2) +#define G2D_INTP_GCMD_FIN		(1 << 1) +#define G2D_INTP_SCMD_FIN		(1 << 0) + +/* G2D_DMA_COMMAND */ +#define G2D_DMA_HALT			(1 << 2) +#define G2D_DMA_CONTINUE		(1 << 1) +#define G2D_DMA_START			(1 << 0) + +/* G2D_DMA_STATUS */ +#define G2D_DMA_LIST_DONE_COUNT		(0xFF << 17) +#define G2D_DMA_BITBLT_DONE_COUNT	(0xFFFF << 1) +#define G2D_DMA_DONE			(1 << 0) +#define G2D_DMA_LIST_DONE_COUNT_OFFSET	17 + +/* G2D_DMA_HOLD_CMD */ +#define G2D_USER_HOLD			(1 << 2) +#define G2D_LIST_HOLD			(1 << 1) +#define G2D_BITBLT_HOLD			(1 << 0) + +/* G2D_BITBLT_START */ +#define G2D_START_CASESEL		(1 << 2) +#define G2D_START_NHOLT			(1 << 1) +#define G2D_START_BITBLT		(1 << 0) + +/* buffer color format */ +#define G2D_FMT_XRGB8888		0 +#define G2D_FMT_ARGB8888		1 +#define G2D_FMT_RGB565			2 +#define G2D_FMT_XRGB1555		3 +#define G2D_FMT_ARGB1555		4 +#define G2D_FMT_XRGB4444		5 +#define G2D_FMT_ARGB4444		6 +#define G2D_FMT_PACKED_RGB888		7 +#define G2D_FMT_A8			11 +#define G2D_FMT_L8			12 + +/* buffer valid length */ +#define G2D_LEN_MIN			1 +#define G2D_LEN_MAX			8000 + +#define G2D_CMDLIST_SIZE		(PAGE_SIZE / 4) +#define G2D_CMDLIST_NUM			64 +#define G2D_CMDLIST_POOL_SIZE		(G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM) +#define G2D_CMDLIST_DATA_NUM		(G2D_CMDLIST_SIZE / sizeof(u32) - 2) + +/* maximum buffer pool size of userptr is 64MB as default */ +#define MAX_POOL		(64 * 1024 * 1024) + +enum { +	BUF_TYPE_GEM = 1, +	BUF_TYPE_USERPTR, +}; + +enum g2d_reg_type { +	REG_TYPE_NONE = -1, +	REG_TYPE_SRC, +	REG_TYPE_SRC_PLANE2, +	REG_TYPE_DST, +	REG_TYPE_DST_PLANE2, +	REG_TYPE_PAT, +	REG_TYPE_MSK, +	MAX_REG_TYPE_NR +}; + +/* cmdlist data structure */ +struct g2d_cmdlist { +	u32		head; +	unsigned long	data[G2D_CMDLIST_DATA_NUM]; +	u32		last;	/* last data offset */ +}; + +/* + * A structure of buffer description + * + * @format: color format + * @left_x: the x coordinates of left top corner + * @top_y: the y coordinates of left top corner + * @right_x: the x coordinates of right bottom corner + * @bottom_y: the y coordinates of right bottom corner + * + */ +struct g2d_buf_desc { +	unsigned int	format; +	unsigned int	left_x; +	unsigned int	top_y; +	unsigned int	right_x; +	unsigned int	bottom_y; +}; + +/* + * A structure of buffer information + * + * @map_nr: manages the number of mapped buffers + * @reg_types: stores regitster type in the order of requested command + * @handles: stores buffer handle in its reg_type position + * @types: stores buffer type in its reg_type position + * @descs: stores buffer description in its reg_type position + * + */ +struct g2d_buf_info { +	unsigned int		map_nr; +	enum g2d_reg_type	reg_types[MAX_REG_TYPE_NR]; +	unsigned long		handles[MAX_REG_TYPE_NR]; +	unsigned int		types[MAX_REG_TYPE_NR]; +	struct g2d_buf_desc	descs[MAX_REG_TYPE_NR]; +}; + +struct drm_exynos_pending_g2d_event { +	struct drm_pending_event	base; +	struct drm_exynos_g2d_event	event; +}; + +struct g2d_cmdlist_userptr { +	struct list_head	list; +	dma_addr_t		dma_addr; +	unsigned long		userptr; +	unsigned long		size; +	struct page		**pages; +	unsigned int		npages; +	struct sg_table		*sgt; +	struct vm_area_struct	*vma; +	atomic_t		refcount; +	bool			in_pool; +	bool			out_of_list; +}; +struct g2d_cmdlist_node { +	struct list_head	list; +	struct g2d_cmdlist	*cmdlist; +	dma_addr_t		dma_addr; +	struct g2d_buf_info	buf_info; + +	struct drm_exynos_pending_g2d_event	*event; +}; + +struct g2d_runqueue_node { +	struct list_head	list; +	struct list_head	run_cmdlist; +	struct list_head	event_list; +	struct drm_file		*filp; +	pid_t			pid; +	struct completion	complete; +	int			async; +}; + +struct g2d_data { +	struct device			*dev; +	struct clk			*gate_clk; +	void __iomem			*regs; +	int				irq; +	struct workqueue_struct		*g2d_workq; +	struct work_struct		runqueue_work; +	struct exynos_drm_subdrv	subdrv; +	bool				suspended; + +	/* cmdlist */ +	struct g2d_cmdlist_node		*cmdlist_node; +	struct list_head		free_cmdlist; +	struct mutex			cmdlist_mutex; +	dma_addr_t			cmdlist_pool; +	void				*cmdlist_pool_virt; +	struct dma_attrs		cmdlist_dma_attrs; + +	/* runqueue*/ +	struct g2d_runqueue_node	*runqueue_node; +	struct list_head		runqueue; +	struct mutex			runqueue_mutex; +	struct kmem_cache		*runqueue_slab; + +	unsigned long			current_pool; +	unsigned long			max_pool; +}; + +static int g2d_init_cmdlist(struct g2d_data *g2d) +{ +	struct device *dev = g2d->dev; +	struct g2d_cmdlist_node *node = g2d->cmdlist_node; +	struct exynos_drm_subdrv *subdrv = &g2d->subdrv; +	int nr; +	int ret; +	struct g2d_buf_info *buf_info; + +	init_dma_attrs(&g2d->cmdlist_dma_attrs); +	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs); + +	g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev, +						G2D_CMDLIST_POOL_SIZE, +						&g2d->cmdlist_pool, GFP_KERNEL, +						&g2d->cmdlist_dma_attrs); +	if (!g2d->cmdlist_pool_virt) { +		dev_err(dev, "failed to allocate dma memory\n"); +		return -ENOMEM; +	} + +	node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL); +	if (!node) { +		dev_err(dev, "failed to allocate memory\n"); +		ret = -ENOMEM; +		goto err; +	} + +	for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) { +		unsigned int i; + +		node[nr].cmdlist = +			g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE; +		node[nr].dma_addr = +			g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE; + +		buf_info = &node[nr].buf_info; +		for (i = 0; i < MAX_REG_TYPE_NR; i++) +			buf_info->reg_types[i] = REG_TYPE_NONE; + +		list_add_tail(&node[nr].list, &g2d->free_cmdlist); +	} + +	return 0; + +err: +	dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE, +			g2d->cmdlist_pool_virt, +			g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs); +	return ret; +} + +static void g2d_fini_cmdlist(struct g2d_data *g2d) +{ +	struct exynos_drm_subdrv *subdrv = &g2d->subdrv; + +	kfree(g2d->cmdlist_node); +	dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE, +			g2d->cmdlist_pool_virt, +			g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs); +} + +static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d) +{ +	struct device *dev = g2d->dev; +	struct g2d_cmdlist_node *node; + +	mutex_lock(&g2d->cmdlist_mutex); +	if (list_empty(&g2d->free_cmdlist)) { +		dev_err(dev, "there is no free cmdlist\n"); +		mutex_unlock(&g2d->cmdlist_mutex); +		return NULL; +	} + +	node = list_first_entry(&g2d->free_cmdlist, struct g2d_cmdlist_node, +				list); +	list_del_init(&node->list); +	mutex_unlock(&g2d->cmdlist_mutex); + +	return node; +} + +static void g2d_put_cmdlist(struct g2d_data *g2d, struct g2d_cmdlist_node *node) +{ +	mutex_lock(&g2d->cmdlist_mutex); +	list_move_tail(&node->list, &g2d->free_cmdlist); +	mutex_unlock(&g2d->cmdlist_mutex); +} + +static void g2d_add_cmdlist_to_inuse(struct exynos_drm_g2d_private *g2d_priv, +				     struct g2d_cmdlist_node *node) +{ +	struct g2d_cmdlist_node *lnode; + +	if (list_empty(&g2d_priv->inuse_cmdlist)) +		goto add_to_list; + +	/* this links to base address of new cmdlist */ +	lnode = list_entry(g2d_priv->inuse_cmdlist.prev, +				struct g2d_cmdlist_node, list); +	lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr; + +add_to_list: +	list_add_tail(&node->list, &g2d_priv->inuse_cmdlist); + +	if (node->event) +		list_add_tail(&node->event->base.link, &g2d_priv->event_list); +} + +static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev, +					unsigned long obj, +					bool force) +{ +	struct g2d_cmdlist_userptr *g2d_userptr = +					(struct g2d_cmdlist_userptr *)obj; + +	if (!obj) +		return; + +	if (force) +		goto out; + +	atomic_dec(&g2d_userptr->refcount); + +	if (atomic_read(&g2d_userptr->refcount) > 0) +		return; + +	if (g2d_userptr->in_pool) +		return; + +out: +	exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt, +					DMA_BIDIRECTIONAL); + +	exynos_gem_put_pages_to_userptr(g2d_userptr->pages, +					g2d_userptr->npages, +					g2d_userptr->vma); + +	exynos_gem_put_vma(g2d_userptr->vma); + +	if (!g2d_userptr->out_of_list) +		list_del_init(&g2d_userptr->list); + +	sg_free_table(g2d_userptr->sgt); +	kfree(g2d_userptr->sgt); + +	drm_free_large(g2d_userptr->pages); +	kfree(g2d_userptr); +} + +static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, +					unsigned long userptr, +					unsigned long size, +					struct drm_file *filp, +					unsigned long *obj) +{ +	struct drm_exynos_file_private *file_priv = filp->driver_priv; +	struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; +	struct g2d_cmdlist_userptr *g2d_userptr; +	struct g2d_data *g2d; +	struct page **pages; +	struct sg_table	*sgt; +	struct vm_area_struct *vma; +	unsigned long start, end; +	unsigned int npages, offset; +	int ret; + +	if (!size) { +		DRM_ERROR("invalid userptr size.\n"); +		return ERR_PTR(-EINVAL); +	} + +	g2d = dev_get_drvdata(g2d_priv->dev); + +	/* check if userptr already exists in userptr_list. */ +	list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) { +		if (g2d_userptr->userptr == userptr) { +			/* +			 * also check size because there could be same address +			 * and different size. +			 */ +			if (g2d_userptr->size == size) { +				atomic_inc(&g2d_userptr->refcount); +				*obj = (unsigned long)g2d_userptr; + +				return &g2d_userptr->dma_addr; +			} + +			/* +			 * at this moment, maybe g2d dma is accessing this +			 * g2d_userptr memory region so just remove this +			 * g2d_userptr object from userptr_list not to be +			 * referred again and also except it the userptr +			 * pool to be released after the dma access completion. +			 */ +			g2d_userptr->out_of_list = true; +			g2d_userptr->in_pool = false; +			list_del_init(&g2d_userptr->list); + +			break; +		} +	} + +	g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL); +	if (!g2d_userptr) +		return ERR_PTR(-ENOMEM); + +	atomic_set(&g2d_userptr->refcount, 1); + +	start = userptr & PAGE_MASK; +	offset = userptr & ~PAGE_MASK; +	end = PAGE_ALIGN(userptr + size); +	npages = (end - start) >> PAGE_SHIFT; +	g2d_userptr->npages = npages; + +	pages = drm_calloc_large(npages, sizeof(struct page *)); +	if (!pages) { +		DRM_ERROR("failed to allocate pages.\n"); +		ret = -ENOMEM; +		goto err_free; +	} + +	down_read(¤t->mm->mmap_sem); +	vma = find_vma(current->mm, userptr); +	if (!vma) { +		up_read(¤t->mm->mmap_sem); +		DRM_ERROR("failed to get vm region.\n"); +		ret = -EFAULT; +		goto err_free_pages; +	} + +	if (vma->vm_end < userptr + size) { +		up_read(¤t->mm->mmap_sem); +		DRM_ERROR("vma is too small.\n"); +		ret = -EFAULT; +		goto err_free_pages; +	} + +	g2d_userptr->vma = exynos_gem_get_vma(vma); +	if (!g2d_userptr->vma) { +		up_read(¤t->mm->mmap_sem); +		DRM_ERROR("failed to copy vma.\n"); +		ret = -ENOMEM; +		goto err_free_pages; +	} + +	g2d_userptr->size = size; + +	ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK, +						npages, pages, vma); +	if (ret < 0) { +		up_read(¤t->mm->mmap_sem); +		DRM_ERROR("failed to get user pages from userptr.\n"); +		goto err_put_vma; +	} + +	up_read(¤t->mm->mmap_sem); +	g2d_userptr->pages = pages; + +	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); +	if (!sgt) { +		ret = -ENOMEM; +		goto err_free_userptr; +	} + +	ret = sg_alloc_table_from_pages(sgt, pages, npages, offset, +					size, GFP_KERNEL); +	if (ret < 0) { +		DRM_ERROR("failed to get sgt from pages.\n"); +		goto err_free_sgt; +	} + +	g2d_userptr->sgt = sgt; + +	ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt, +						DMA_BIDIRECTIONAL); +	if (ret < 0) { +		DRM_ERROR("failed to map sgt with dma region.\n"); +		goto err_sg_free_table; +	} + +	g2d_userptr->dma_addr = sgt->sgl[0].dma_address; +	g2d_userptr->userptr = userptr; + +	list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list); + +	if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) { +		g2d->current_pool += npages << PAGE_SHIFT; +		g2d_userptr->in_pool = true; +	} + +	*obj = (unsigned long)g2d_userptr; + +	return &g2d_userptr->dma_addr; + +err_sg_free_table: +	sg_free_table(sgt); + +err_free_sgt: +	kfree(sgt); + +err_free_userptr: +	exynos_gem_put_pages_to_userptr(g2d_userptr->pages, +					g2d_userptr->npages, +					g2d_userptr->vma); + +err_put_vma: +	exynos_gem_put_vma(g2d_userptr->vma); + +err_free_pages: +	drm_free_large(pages); + +err_free: +	kfree(g2d_userptr); + +	return ERR_PTR(ret); +} + +static void g2d_userptr_free_all(struct drm_device *drm_dev, +					struct g2d_data *g2d, +					struct drm_file *filp) +{ +	struct drm_exynos_file_private *file_priv = filp->driver_priv; +	struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; +	struct g2d_cmdlist_userptr *g2d_userptr, *n; + +	list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list) +		if (g2d_userptr->in_pool) +			g2d_userptr_put_dma_addr(drm_dev, +						(unsigned long)g2d_userptr, +						true); + +	g2d->current_pool = 0; +} + +static enum g2d_reg_type g2d_get_reg_type(int reg_offset) +{ +	enum g2d_reg_type reg_type; + +	switch (reg_offset) { +	case G2D_SRC_BASE_ADDR: +	case G2D_SRC_COLOR_MODE: +	case G2D_SRC_LEFT_TOP: +	case G2D_SRC_RIGHT_BOTTOM: +		reg_type = REG_TYPE_SRC; +		break; +	case G2D_SRC_PLANE2_BASE_ADDR: +		reg_type = REG_TYPE_SRC_PLANE2; +		break; +	case G2D_DST_BASE_ADDR: +	case G2D_DST_COLOR_MODE: +	case G2D_DST_LEFT_TOP: +	case G2D_DST_RIGHT_BOTTOM: +		reg_type = REG_TYPE_DST; +		break; +	case G2D_DST_PLANE2_BASE_ADDR: +		reg_type = REG_TYPE_DST_PLANE2; +		break; +	case G2D_PAT_BASE_ADDR: +		reg_type = REG_TYPE_PAT; +		break; +	case G2D_MSK_BASE_ADDR: +		reg_type = REG_TYPE_MSK; +		break; +	default: +		reg_type = REG_TYPE_NONE; +		DRM_ERROR("Unknown register offset![%d]\n", reg_offset); +		break; +	} + +	return reg_type; +} + +static unsigned long g2d_get_buf_bpp(unsigned int format) +{ +	unsigned long bpp; + +	switch (format) { +	case G2D_FMT_XRGB8888: +	case G2D_FMT_ARGB8888: +		bpp = 4; +		break; +	case G2D_FMT_RGB565: +	case G2D_FMT_XRGB1555: +	case G2D_FMT_ARGB1555: +	case G2D_FMT_XRGB4444: +	case G2D_FMT_ARGB4444: +		bpp = 2; +		break; +	case G2D_FMT_PACKED_RGB888: +		bpp = 3; +		break; +	default: +		bpp = 1; +		break; +	} + +	return bpp; +} + +static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc, +						enum g2d_reg_type reg_type, +						unsigned long size) +{ +	unsigned int width, height; +	unsigned long area; + +	/* +	 * check source and destination buffers only. +	 * so the others are always valid. +	 */ +	if (reg_type != REG_TYPE_SRC && reg_type != REG_TYPE_DST) +		return true; + +	width = buf_desc->right_x - buf_desc->left_x; +	if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) { +		DRM_ERROR("width[%u] is out of range!\n", width); +		return false; +	} + +	height = buf_desc->bottom_y - buf_desc->top_y; +	if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) { +		DRM_ERROR("height[%u] is out of range!\n", height); +		return false; +	} + +	area = (unsigned long)width * (unsigned long)height * +					g2d_get_buf_bpp(buf_desc->format); +	if (area > size) { +		DRM_ERROR("area[%lu] is out of range[%lu]!\n", area, size); +		return false; +	} + +	return true; +} + +static int g2d_map_cmdlist_gem(struct g2d_data *g2d, +				struct g2d_cmdlist_node *node, +				struct drm_device *drm_dev, +				struct drm_file *file) +{ +	struct g2d_cmdlist *cmdlist = node->cmdlist; +	struct g2d_buf_info *buf_info = &node->buf_info; +	int offset; +	int ret; +	int i; + +	for (i = 0; i < buf_info->map_nr; i++) { +		struct g2d_buf_desc *buf_desc; +		enum g2d_reg_type reg_type; +		int reg_pos; +		unsigned long handle; +		dma_addr_t *addr; + +		reg_pos = cmdlist->last - 2 * (i + 1); + +		offset = cmdlist->data[reg_pos]; +		handle = cmdlist->data[reg_pos + 1]; + +		reg_type = g2d_get_reg_type(offset); +		if (reg_type == REG_TYPE_NONE) { +			ret = -EFAULT; +			goto err; +		} + +		buf_desc = &buf_info->descs[reg_type]; + +		if (buf_info->types[reg_type] == BUF_TYPE_GEM) { +			unsigned long size; + +			size = exynos_drm_gem_get_size(drm_dev, handle, file); +			if (!size) { +				ret = -EFAULT; +				goto err; +			} + +			if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type, +									size)) { +				ret = -EFAULT; +				goto err; +			} + +			addr = exynos_drm_gem_get_dma_addr(drm_dev, handle, +								file); +			if (IS_ERR(addr)) { +				ret = -EFAULT; +				goto err; +			} +		} else { +			struct drm_exynos_g2d_userptr g2d_userptr; + +			if (copy_from_user(&g2d_userptr, (void __user *)handle, +				sizeof(struct drm_exynos_g2d_userptr))) { +				ret = -EFAULT; +				goto err; +			} + +			if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type, +							g2d_userptr.size)) { +				ret = -EFAULT; +				goto err; +			} + +			addr = g2d_userptr_get_dma_addr(drm_dev, +							g2d_userptr.userptr, +							g2d_userptr.size, +							file, +							&handle); +			if (IS_ERR(addr)) { +				ret = -EFAULT; +				goto err; +			} +		} + +		cmdlist->data[reg_pos + 1] = *addr; +		buf_info->reg_types[i] = reg_type; +		buf_info->handles[reg_type] = handle; +	} + +	return 0; + +err: +	buf_info->map_nr = i; +	return ret; +} + +static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d, +				  struct g2d_cmdlist_node *node, +				  struct drm_file *filp) +{ +	struct exynos_drm_subdrv *subdrv = &g2d->subdrv; +	struct g2d_buf_info *buf_info = &node->buf_info; +	int i; + +	for (i = 0; i < buf_info->map_nr; i++) { +		struct g2d_buf_desc *buf_desc; +		enum g2d_reg_type reg_type; +		unsigned long handle; + +		reg_type = buf_info->reg_types[i]; + +		buf_desc = &buf_info->descs[reg_type]; +		handle = buf_info->handles[reg_type]; + +		if (buf_info->types[reg_type] == BUF_TYPE_GEM) +			exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle, +							filp); +		else +			g2d_userptr_put_dma_addr(subdrv->drm_dev, handle, +							false); + +		buf_info->reg_types[i] = REG_TYPE_NONE; +		buf_info->handles[reg_type] = 0; +		buf_info->types[reg_type] = 0; +		memset(buf_desc, 0x00, sizeof(*buf_desc)); +	} + +	buf_info->map_nr = 0; +} + +static void g2d_dma_start(struct g2d_data *g2d, +			  struct g2d_runqueue_node *runqueue_node) +{ +	struct g2d_cmdlist_node *node = +				list_first_entry(&runqueue_node->run_cmdlist, +						struct g2d_cmdlist_node, list); +	int ret; + +	ret = pm_runtime_get_sync(g2d->dev); +	if (ret < 0) +		return; + +	writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); +	writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); +} + +static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d) +{ +	struct g2d_runqueue_node *runqueue_node; + +	if (list_empty(&g2d->runqueue)) +		return NULL; + +	runqueue_node = list_first_entry(&g2d->runqueue, +					 struct g2d_runqueue_node, list); +	list_del_init(&runqueue_node->list); +	return runqueue_node; +} + +static void g2d_free_runqueue_node(struct g2d_data *g2d, +				   struct g2d_runqueue_node *runqueue_node) +{ +	struct g2d_cmdlist_node *node; + +	if (!runqueue_node) +		return; + +	mutex_lock(&g2d->cmdlist_mutex); +	/* +	 * commands in run_cmdlist have been completed so unmap all gem +	 * objects in each command node so that they are unreferenced. +	 */ +	list_for_each_entry(node, &runqueue_node->run_cmdlist, list) +		g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp); +	list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist); +	mutex_unlock(&g2d->cmdlist_mutex); + +	kmem_cache_free(g2d->runqueue_slab, runqueue_node); +} + +static void g2d_exec_runqueue(struct g2d_data *g2d) +{ +	g2d->runqueue_node = g2d_get_runqueue_node(g2d); +	if (g2d->runqueue_node) +		g2d_dma_start(g2d, g2d->runqueue_node); +} + +static void g2d_runqueue_worker(struct work_struct *work) +{ +	struct g2d_data *g2d = container_of(work, struct g2d_data, +					    runqueue_work); + +	mutex_lock(&g2d->runqueue_mutex); +	pm_runtime_put_sync(g2d->dev); + +	complete(&g2d->runqueue_node->complete); +	if (g2d->runqueue_node->async) +		g2d_free_runqueue_node(g2d, g2d->runqueue_node); + +	if (g2d->suspended) +		g2d->runqueue_node = NULL; +	else +		g2d_exec_runqueue(g2d); +	mutex_unlock(&g2d->runqueue_mutex); +} + +static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no) +{ +	struct drm_device *drm_dev = g2d->subdrv.drm_dev; +	struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node; +	struct drm_exynos_pending_g2d_event *e; +	struct timeval now; +	unsigned long flags; + +	if (list_empty(&runqueue_node->event_list)) +		return; + +	e = list_first_entry(&runqueue_node->event_list, +			     struct drm_exynos_pending_g2d_event, base.link); + +	do_gettimeofday(&now); +	e->event.tv_sec = now.tv_sec; +	e->event.tv_usec = now.tv_usec; +	e->event.cmdlist_no = cmdlist_no; + +	spin_lock_irqsave(&drm_dev->event_lock, flags); +	list_move_tail(&e->base.link, &e->base.file_priv->event_list); +	wake_up_interruptible(&e->base.file_priv->event_wait); +	spin_unlock_irqrestore(&drm_dev->event_lock, flags); +} + +static irqreturn_t g2d_irq_handler(int irq, void *dev_id) +{ +	struct g2d_data *g2d = dev_id; +	u32 pending; + +	pending = readl_relaxed(g2d->regs + G2D_INTC_PEND); +	if (pending) +		writel_relaxed(pending, g2d->regs + G2D_INTC_PEND); + +	if (pending & G2D_INTP_GCMD_FIN) { +		u32 cmdlist_no = readl_relaxed(g2d->regs + G2D_DMA_STATUS); + +		cmdlist_no = (cmdlist_no & G2D_DMA_LIST_DONE_COUNT) >> +						G2D_DMA_LIST_DONE_COUNT_OFFSET; + +		g2d_finish_event(g2d, cmdlist_no); + +		writel_relaxed(0, g2d->regs + G2D_DMA_HOLD_CMD); +		if (!(pending & G2D_INTP_ACMD_FIN)) { +			writel_relaxed(G2D_DMA_CONTINUE, +					g2d->regs + G2D_DMA_COMMAND); +		} +	} + +	if (pending & G2D_INTP_ACMD_FIN) +		queue_work(g2d->g2d_workq, &g2d->runqueue_work); + +	return IRQ_HANDLED; +} + +static int g2d_check_reg_offset(struct device *dev, +				struct g2d_cmdlist_node *node, +				int nr, bool for_addr) +{ +	struct g2d_cmdlist *cmdlist = node->cmdlist; +	int reg_offset; +	int index; +	int i; + +	for (i = 0; i < nr; i++) { +		struct g2d_buf_info *buf_info = &node->buf_info; +		struct g2d_buf_desc *buf_desc; +		enum g2d_reg_type reg_type; +		unsigned long value; + +		index = cmdlist->last - 2 * (i + 1); + +		reg_offset = cmdlist->data[index] & ~0xfffff000; +		if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END) +			goto err; +		if (reg_offset % 4) +			goto err; + +		switch (reg_offset) { +		case G2D_SRC_BASE_ADDR: +		case G2D_SRC_PLANE2_BASE_ADDR: +		case G2D_DST_BASE_ADDR: +		case G2D_DST_PLANE2_BASE_ADDR: +		case G2D_PAT_BASE_ADDR: +		case G2D_MSK_BASE_ADDR: +			if (!for_addr) +				goto err; + +			reg_type = g2d_get_reg_type(reg_offset); +			if (reg_type == REG_TYPE_NONE) +				goto err; + +			/* check userptr buffer type. */ +			if ((cmdlist->data[index] & ~0x7fffffff) >> 31) { +				buf_info->types[reg_type] = BUF_TYPE_USERPTR; +				cmdlist->data[index] &= ~G2D_BUF_USERPTR; +			} else +				buf_info->types[reg_type] = BUF_TYPE_GEM; +			break; +		case G2D_SRC_COLOR_MODE: +		case G2D_DST_COLOR_MODE: +			if (for_addr) +				goto err; + +			reg_type = g2d_get_reg_type(reg_offset); +			if (reg_type == REG_TYPE_NONE) +				goto err; + +			buf_desc = &buf_info->descs[reg_type]; +			value = cmdlist->data[index + 1]; + +			buf_desc->format = value & 0xf; +			break; +		case G2D_SRC_LEFT_TOP: +		case G2D_DST_LEFT_TOP: +			if (for_addr) +				goto err; + +			reg_type = g2d_get_reg_type(reg_offset); +			if (reg_type == REG_TYPE_NONE) +				goto err; + +			buf_desc = &buf_info->descs[reg_type]; +			value = cmdlist->data[index + 1]; + +			buf_desc->left_x = value & 0x1fff; +			buf_desc->top_y = (value & 0x1fff0000) >> 16; +			break; +		case G2D_SRC_RIGHT_BOTTOM: +		case G2D_DST_RIGHT_BOTTOM: +			if (for_addr) +				goto err; + +			reg_type = g2d_get_reg_type(reg_offset); +			if (reg_type == REG_TYPE_NONE) +				goto err; + +			buf_desc = &buf_info->descs[reg_type]; +			value = cmdlist->data[index + 1]; + +			buf_desc->right_x = value & 0x1fff; +			buf_desc->bottom_y = (value & 0x1fff0000) >> 16; +			break; +		default: +			if (for_addr) +				goto err; +			break; +		} +	} + +	return 0; + +err: +	dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]); +	return -EINVAL; +} + +/* ioctl functions */ +int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data, +			     struct drm_file *file) +{ +	struct drm_exynos_g2d_get_ver *ver = data; + +	ver->major = G2D_HW_MAJOR_VER; +	ver->minor = G2D_HW_MINOR_VER; + +	return 0; +} +EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl); + +int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, +				 struct drm_file *file) +{ +	struct drm_exynos_file_private *file_priv = file->driver_priv; +	struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; +	struct device *dev = g2d_priv->dev; +	struct g2d_data *g2d; +	struct drm_exynos_g2d_set_cmdlist *req = data; +	struct drm_exynos_g2d_cmd *cmd; +	struct drm_exynos_pending_g2d_event *e; +	struct g2d_cmdlist_node *node; +	struct g2d_cmdlist *cmdlist; +	unsigned long flags; +	int size; +	int ret; + +	if (!dev) +		return -ENODEV; + +	g2d = dev_get_drvdata(dev); +	if (!g2d) +		return -EFAULT; + +	node = g2d_get_cmdlist(g2d); +	if (!node) +		return -ENOMEM; + +	node->event = NULL; + +	if (req->event_type != G2D_EVENT_NOT) { +		spin_lock_irqsave(&drm_dev->event_lock, flags); +		if (file->event_space < sizeof(e->event)) { +			spin_unlock_irqrestore(&drm_dev->event_lock, flags); +			ret = -ENOMEM; +			goto err; +		} +		file->event_space -= sizeof(e->event); +		spin_unlock_irqrestore(&drm_dev->event_lock, flags); + +		e = kzalloc(sizeof(*node->event), GFP_KERNEL); +		if (!e) { +			spin_lock_irqsave(&drm_dev->event_lock, flags); +			file->event_space += sizeof(e->event); +			spin_unlock_irqrestore(&drm_dev->event_lock, flags); + +			ret = -ENOMEM; +			goto err; +		} + +		e->event.base.type = DRM_EXYNOS_G2D_EVENT; +		e->event.base.length = sizeof(e->event); +		e->event.user_data = req->user_data; +		e->base.event = &e->event.base; +		e->base.file_priv = file; +		e->base.destroy = (void (*) (struct drm_pending_event *)) kfree; + +		node->event = e; +	} + +	cmdlist = node->cmdlist; + +	cmdlist->last = 0; + +	/* +	 * If don't clear SFR registers, the cmdlist is affected by register +	 * values of previous cmdlist. G2D hw executes SFR clear command and +	 * a next command at the same time then the next command is ignored and +	 * is executed rightly from next next command, so needs a dummy command +	 * to next command of SFR clear command. +	 */ +	cmdlist->data[cmdlist->last++] = G2D_SOFT_RESET; +	cmdlist->data[cmdlist->last++] = G2D_SFRCLEAR; +	cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR; +	cmdlist->data[cmdlist->last++] = 0; + +	/* +	 * 'LIST_HOLD' command should be set to the DMA_HOLD_CMD_REG +	 * and GCF bit should be set to INTEN register if user wants +	 * G2D interrupt event once current command list execution is +	 * finished. +	 * Otherwise only ACF bit should be set to INTEN register so +	 * that one interrupt is occurred after all command lists +	 * have been completed. +	 */ +	if (node->event) { +		cmdlist->data[cmdlist->last++] = G2D_INTEN; +		cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF | G2D_INTEN_GCF; +		cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD; +		cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD; +	} else { +		cmdlist->data[cmdlist->last++] = G2D_INTEN; +		cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF; +	} + +	/* Check size of cmdlist: last 2 is about G2D_BITBLT_START */ +	size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2; +	if (size > G2D_CMDLIST_DATA_NUM) { +		dev_err(dev, "cmdlist size is too big\n"); +		ret = -EINVAL; +		goto err_free_event; +	} + +	cmd = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd; + +	if (copy_from_user(cmdlist->data + cmdlist->last, +				(void __user *)cmd, +				sizeof(*cmd) * req->cmd_nr)) { +		ret = -EFAULT; +		goto err_free_event; +	} +	cmdlist->last += req->cmd_nr * 2; + +	ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false); +	if (ret < 0) +		goto err_free_event; + +	node->buf_info.map_nr = req->cmd_buf_nr; +	if (req->cmd_buf_nr) { +		struct drm_exynos_g2d_cmd *cmd_buf; + +		cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf; + +		if (copy_from_user(cmdlist->data + cmdlist->last, +					(void __user *)cmd_buf, +					sizeof(*cmd_buf) * req->cmd_buf_nr)) { +			ret = -EFAULT; +			goto err_free_event; +		} +		cmdlist->last += req->cmd_buf_nr * 2; + +		ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true); +		if (ret < 0) +			goto err_free_event; + +		ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file); +		if (ret < 0) +			goto err_unmap; +	} + +	cmdlist->data[cmdlist->last++] = G2D_BITBLT_START; +	cmdlist->data[cmdlist->last++] = G2D_START_BITBLT; + +	/* head */ +	cmdlist->head = cmdlist->last / 2; + +	/* tail */ +	cmdlist->data[cmdlist->last] = 0; + +	g2d_add_cmdlist_to_inuse(g2d_priv, node); + +	return 0; + +err_unmap: +	g2d_unmap_cmdlist_gem(g2d, node, file); +err_free_event: +	if (node->event) { +		spin_lock_irqsave(&drm_dev->event_lock, flags); +		file->event_space += sizeof(e->event); +		spin_unlock_irqrestore(&drm_dev->event_lock, flags); +		kfree(node->event); +	} +err: +	g2d_put_cmdlist(g2d, node); +	return ret; +} +EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl); + +int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, +			  struct drm_file *file) +{ +	struct drm_exynos_file_private *file_priv = file->driver_priv; +	struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; +	struct device *dev = g2d_priv->dev; +	struct g2d_data *g2d; +	struct drm_exynos_g2d_exec *req = data; +	struct g2d_runqueue_node *runqueue_node; +	struct list_head *run_cmdlist; +	struct list_head *event_list; + +	if (!dev) +		return -ENODEV; + +	g2d = dev_get_drvdata(dev); +	if (!g2d) +		return -EFAULT; + +	runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL); +	if (!runqueue_node) { +		dev_err(dev, "failed to allocate memory\n"); +		return -ENOMEM; +	} +	run_cmdlist = &runqueue_node->run_cmdlist; +	event_list = &runqueue_node->event_list; +	INIT_LIST_HEAD(run_cmdlist); +	INIT_LIST_HEAD(event_list); +	init_completion(&runqueue_node->complete); +	runqueue_node->async = req->async; + +	list_splice_init(&g2d_priv->inuse_cmdlist, run_cmdlist); +	list_splice_init(&g2d_priv->event_list, event_list); + +	if (list_empty(run_cmdlist)) { +		dev_err(dev, "there is no inuse cmdlist\n"); +		kmem_cache_free(g2d->runqueue_slab, runqueue_node); +		return -EPERM; +	} + +	mutex_lock(&g2d->runqueue_mutex); +	runqueue_node->pid = current->pid; +	runqueue_node->filp = file; +	list_add_tail(&runqueue_node->list, &g2d->runqueue); +	if (!g2d->runqueue_node) +		g2d_exec_runqueue(g2d); +	mutex_unlock(&g2d->runqueue_mutex); + +	if (runqueue_node->async) +		goto out; + +	wait_for_completion(&runqueue_node->complete); +	g2d_free_runqueue_node(g2d, runqueue_node); + +out: +	return 0; +} +EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl); + +static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev) +{ +	struct g2d_data *g2d; +	int ret; + +	g2d = dev_get_drvdata(dev); +	if (!g2d) +		return -EFAULT; + +	/* allocate dma-aware cmdlist buffer. */ +	ret = g2d_init_cmdlist(g2d); +	if (ret < 0) { +		dev_err(dev, "cmdlist init failed\n"); +		return ret; +	} + +	if (!is_drm_iommu_supported(drm_dev)) +		return 0; + +	ret = drm_iommu_attach_device(drm_dev, dev); +	if (ret < 0) { +		dev_err(dev, "failed to enable iommu.\n"); +		g2d_fini_cmdlist(g2d); +	} + +	return ret; + +} + +static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev) +{ +	if (!is_drm_iommu_supported(drm_dev)) +		return; + +	drm_iommu_detach_device(drm_dev, dev); +} + +static int g2d_open(struct drm_device *drm_dev, struct device *dev, +			struct drm_file *file) +{ +	struct drm_exynos_file_private *file_priv = file->driver_priv; +	struct exynos_drm_g2d_private *g2d_priv; + +	g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL); +	if (!g2d_priv) +		return -ENOMEM; + +	g2d_priv->dev = dev; +	file_priv->g2d_priv = g2d_priv; + +	INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist); +	INIT_LIST_HEAD(&g2d_priv->event_list); +	INIT_LIST_HEAD(&g2d_priv->userptr_list); + +	return 0; +} + +static void g2d_close(struct drm_device *drm_dev, struct device *dev, +			struct drm_file *file) +{ +	struct drm_exynos_file_private *file_priv = file->driver_priv; +	struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; +	struct g2d_data *g2d; +	struct g2d_cmdlist_node *node, *n; + +	if (!dev) +		return; + +	g2d = dev_get_drvdata(dev); +	if (!g2d) +		return; + +	mutex_lock(&g2d->cmdlist_mutex); +	list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) { +		/* +		 * unmap all gem objects not completed. +		 * +		 * P.S. if current process was terminated forcely then +		 * there may be some commands in inuse_cmdlist so unmap +		 * them. +		 */ +		g2d_unmap_cmdlist_gem(g2d, node, file); +		list_move_tail(&node->list, &g2d->free_cmdlist); +	} +	mutex_unlock(&g2d->cmdlist_mutex); + +	/* release all g2d_userptr in pool. */ +	g2d_userptr_free_all(drm_dev, g2d, file); + +	kfree(file_priv->g2d_priv); +} + +static int g2d_probe(struct platform_device *pdev) +{ +	struct device *dev = &pdev->dev; +	struct resource *res; +	struct g2d_data *g2d; +	struct exynos_drm_subdrv *subdrv; +	int ret; + +	g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL); +	if (!g2d) +		return -ENOMEM; + +	g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab", +			sizeof(struct g2d_runqueue_node), 0, 0, NULL); +	if (!g2d->runqueue_slab) +		return -ENOMEM; + +	g2d->dev = dev; + +	g2d->g2d_workq = create_singlethread_workqueue("g2d"); +	if (!g2d->g2d_workq) { +		dev_err(dev, "failed to create workqueue\n"); +		ret = -EINVAL; +		goto err_destroy_slab; +	} + +	INIT_WORK(&g2d->runqueue_work, g2d_runqueue_worker); +	INIT_LIST_HEAD(&g2d->free_cmdlist); +	INIT_LIST_HEAD(&g2d->runqueue); + +	mutex_init(&g2d->cmdlist_mutex); +	mutex_init(&g2d->runqueue_mutex); + +	g2d->gate_clk = devm_clk_get(dev, "fimg2d"); +	if (IS_ERR(g2d->gate_clk)) { +		dev_err(dev, "failed to get gate clock\n"); +		ret = PTR_ERR(g2d->gate_clk); +		goto err_destroy_workqueue; +	} + +	pm_runtime_enable(dev); + +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + +	g2d->regs = devm_ioremap_resource(dev, res); +	if (IS_ERR(g2d->regs)) { +		ret = PTR_ERR(g2d->regs); +		goto err_put_clk; +	} + +	g2d->irq = platform_get_irq(pdev, 0); +	if (g2d->irq < 0) { +		dev_err(dev, "failed to get irq\n"); +		ret = g2d->irq; +		goto err_put_clk; +	} + +	ret = devm_request_irq(dev, g2d->irq, g2d_irq_handler, 0, +								"drm_g2d", g2d); +	if (ret < 0) { +		dev_err(dev, "irq request failed\n"); +		goto err_put_clk; +	} + +	g2d->max_pool = MAX_POOL; + +	platform_set_drvdata(pdev, g2d); + +	subdrv = &g2d->subdrv; +	subdrv->dev = dev; +	subdrv->probe = g2d_subdrv_probe; +	subdrv->remove = g2d_subdrv_remove; +	subdrv->open = g2d_open; +	subdrv->close = g2d_close; + +	ret = exynos_drm_subdrv_register(subdrv); +	if (ret < 0) { +		dev_err(dev, "failed to register drm g2d device\n"); +		goto err_put_clk; +	} + +	dev_info(dev, "The exynos g2d(ver %d.%d) successfully probed\n", +			G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER); + +	return 0; + +err_put_clk: +	pm_runtime_disable(dev); +err_destroy_workqueue: +	destroy_workqueue(g2d->g2d_workq); +err_destroy_slab: +	kmem_cache_destroy(g2d->runqueue_slab); +	return ret; +} + +static int g2d_remove(struct platform_device *pdev) +{ +	struct g2d_data *g2d = platform_get_drvdata(pdev); + +	cancel_work_sync(&g2d->runqueue_work); +	exynos_drm_subdrv_unregister(&g2d->subdrv); + +	while (g2d->runqueue_node) { +		g2d_free_runqueue_node(g2d, g2d->runqueue_node); +		g2d->runqueue_node = g2d_get_runqueue_node(g2d); +	} + +	pm_runtime_disable(&pdev->dev); + +	g2d_fini_cmdlist(g2d); +	destroy_workqueue(g2d->g2d_workq); +	kmem_cache_destroy(g2d->runqueue_slab); + +	return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int g2d_suspend(struct device *dev) +{ +	struct g2d_data *g2d = dev_get_drvdata(dev); + +	mutex_lock(&g2d->runqueue_mutex); +	g2d->suspended = true; +	mutex_unlock(&g2d->runqueue_mutex); + +	while (g2d->runqueue_node) +		/* FIXME: good range? */ +		usleep_range(500, 1000); + +	flush_work(&g2d->runqueue_work); + +	return 0; +} + +static int g2d_resume(struct device *dev) +{ +	struct g2d_data *g2d = dev_get_drvdata(dev); + +	g2d->suspended = false; +	g2d_exec_runqueue(g2d); + +	return 0; +} +#endif + +#ifdef CONFIG_PM_RUNTIME +static int g2d_runtime_suspend(struct device *dev) +{ +	struct g2d_data *g2d = dev_get_drvdata(dev); + +	clk_disable_unprepare(g2d->gate_clk); + +	return 0; +} + +static int g2d_runtime_resume(struct device *dev) +{ +	struct g2d_data *g2d = dev_get_drvdata(dev); +	int ret; + +	ret = clk_prepare_enable(g2d->gate_clk); +	if (ret < 0) +		dev_warn(dev, "failed to enable clock.\n"); + +	return ret; +} +#endif + +static const struct dev_pm_ops g2d_pm_ops = { +	SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume) +	SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL) +}; + +static const struct of_device_id exynos_g2d_match[] = { +	{ .compatible = "samsung,exynos5250-g2d" }, +	{}, +}; + +struct platform_driver g2d_driver = { +	.probe		= g2d_probe, +	.remove		= g2d_remove, +	.driver		= { +		.name	= "s5p-g2d", +		.owner	= THIS_MODULE, +		.pm	= &g2d_pm_ops, +		.of_match_table = exynos_g2d_match, +	}, +}; diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.h b/drivers/gpu/drm/exynos/exynos_drm_g2d.h new file mode 100644 index 00000000000..1a9c7ca8c15 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.h @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2012 Samsung Electronics Co.Ltd + * Authors: Joonyoung Shim <jy0922.shim@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundationr + */ + +#ifdef CONFIG_DRM_EXYNOS_G2D +extern int exynos_g2d_get_ver_ioctl(struct drm_device *dev, void *data, +				    struct drm_file *file_priv); +extern int exynos_g2d_set_cmdlist_ioctl(struct drm_device *dev, void *data, +					struct drm_file *file_priv); +extern int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data, +				 struct drm_file *file_priv); +#else +static inline int exynos_g2d_get_ver_ioctl(struct drm_device *dev, void *data, +					   struct drm_file *file_priv) +{ +	return -ENODEV; +} + +static inline int exynos_g2d_set_cmdlist_ioctl(struct drm_device *dev, +					       void *data, +					       struct drm_file *file_priv) +{ +	return -ENODEV; +} + +static inline int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data, +					struct drm_file *file_priv) +{ +	return -ENODEV; +} +#endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c new file mode 100644 index 00000000000..163a054922c --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -0,0 +1,728 @@ +/* exynos_drm_gem.c + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * Author: Inki Dae <inki.dae@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#include <drm/drmP.h> +#include <drm/drm_vma_manager.h> + +#include <linux/shmem_fs.h> +#include <drm/exynos_drm.h> + +#include "exynos_drm_drv.h" +#include "exynos_drm_gem.h" +#include "exynos_drm_buf.h" +#include "exynos_drm_iommu.h" + +static unsigned int convert_to_vm_err_msg(int msg) +{ +	unsigned int out_msg; + +	switch (msg) { +	case 0: +	case -ERESTARTSYS: +	case -EINTR: +		out_msg = VM_FAULT_NOPAGE; +		break; + +	case -ENOMEM: +		out_msg = VM_FAULT_OOM; +		break; + +	default: +		out_msg = VM_FAULT_SIGBUS; +		break; +	} + +	return out_msg; +} + +static int check_gem_flags(unsigned int flags) +{ +	if (flags & ~(EXYNOS_BO_MASK)) { +		DRM_ERROR("invalid flags.\n"); +		return -EINVAL; +	} + +	return 0; +} + +static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj, +					struct vm_area_struct *vma) +{ +	DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags); + +	/* non-cachable as default. */ +	if (obj->flags & EXYNOS_BO_CACHABLE) +		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); +	else if (obj->flags & EXYNOS_BO_WC) +		vma->vm_page_prot = +			pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); +	else +		vma->vm_page_prot = +			pgprot_noncached(vm_get_page_prot(vma->vm_flags)); +} + +static unsigned long roundup_gem_size(unsigned long size, unsigned int flags) +{ +	/* TODO */ + +	return roundup(size, PAGE_SIZE); +} + +static int exynos_drm_gem_map_buf(struct drm_gem_object *obj, +					struct vm_area_struct *vma, +					unsigned long f_vaddr, +					pgoff_t page_offset) +{ +	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); +	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; +	struct scatterlist *sgl; +	unsigned long pfn; +	int i; + +	if (!buf->sgt) +		return -EINTR; + +	if (page_offset >= (buf->size >> PAGE_SHIFT)) { +		DRM_ERROR("invalid page offset\n"); +		return -EINVAL; +	} + +	sgl = buf->sgt->sgl; +	for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) { +		if (page_offset < (sgl->length >> PAGE_SHIFT)) +			break; +		page_offset -=	(sgl->length >> PAGE_SHIFT); +	} + +	pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset; + +	return vm_insert_mixed(vma, f_vaddr, pfn); +} + +static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, +					struct drm_file *file_priv, +					unsigned int *handle) +{ +	int ret; + +	/* +	 * allocate a id of idr table where the obj is registered +	 * and handle has the id what user can see. +	 */ +	ret = drm_gem_handle_create(file_priv, obj, handle); +	if (ret) +		return ret; + +	DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle); + +	/* drop reference from allocate - handle holds it now. */ +	drm_gem_object_unreference_unlocked(obj); + +	return 0; +} + +void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) +{ +	struct drm_gem_object *obj; +	struct exynos_drm_gem_buf *buf; + +	obj = &exynos_gem_obj->base; +	buf = exynos_gem_obj->buffer; + +	DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count); + +	/* +	 * do not release memory region from exporter. +	 * +	 * the region will be released by exporter +	 * once dmabuf's refcount becomes 0. +	 */ +	if (obj->import_attach) +		goto out; + +	exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf); + +out: +	exynos_drm_fini_buf(obj->dev, buf); +	exynos_gem_obj->buffer = NULL; + +	drm_gem_free_mmap_offset(obj); + +	/* release file pointer to gem object. */ +	drm_gem_object_release(obj); + +	kfree(exynos_gem_obj); +	exynos_gem_obj = NULL; +} + +unsigned long exynos_drm_gem_get_size(struct drm_device *dev, +						unsigned int gem_handle, +						struct drm_file *file_priv) +{ +	struct exynos_drm_gem_obj *exynos_gem_obj; +	struct drm_gem_object *obj; + +	obj = drm_gem_object_lookup(dev, file_priv, gem_handle); +	if (!obj) { +		DRM_ERROR("failed to lookup gem object.\n"); +		return 0; +	} + +	exynos_gem_obj = to_exynos_gem_obj(obj); + +	drm_gem_object_unreference_unlocked(obj); + +	return exynos_gem_obj->buffer->size; +} + + +struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, +						      unsigned long size) +{ +	struct exynos_drm_gem_obj *exynos_gem_obj; +	struct drm_gem_object *obj; +	int ret; + +	exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL); +	if (!exynos_gem_obj) +		return NULL; + +	exynos_gem_obj->size = size; +	obj = &exynos_gem_obj->base; + +	ret = drm_gem_object_init(dev, obj, size); +	if (ret < 0) { +		DRM_ERROR("failed to initialize gem object\n"); +		kfree(exynos_gem_obj); +		return NULL; +	} + +	DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); + +	return exynos_gem_obj; +} + +struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, +						unsigned int flags, +						unsigned long size) +{ +	struct exynos_drm_gem_obj *exynos_gem_obj; +	struct exynos_drm_gem_buf *buf; +	int ret; + +	if (!size) { +		DRM_ERROR("invalid size.\n"); +		return ERR_PTR(-EINVAL); +	} + +	size = roundup_gem_size(size, flags); + +	ret = check_gem_flags(flags); +	if (ret) +		return ERR_PTR(ret); + +	buf = exynos_drm_init_buf(dev, size); +	if (!buf) +		return ERR_PTR(-ENOMEM); + +	exynos_gem_obj = exynos_drm_gem_init(dev, size); +	if (!exynos_gem_obj) { +		ret = -ENOMEM; +		goto err_fini_buf; +	} + +	exynos_gem_obj->buffer = buf; + +	/* set memory type and cache attribute from user side. */ +	exynos_gem_obj->flags = flags; + +	ret = exynos_drm_alloc_buf(dev, buf, flags); +	if (ret < 0) +		goto err_gem_fini; + +	return exynos_gem_obj; + +err_gem_fini: +	drm_gem_object_release(&exynos_gem_obj->base); +	kfree(exynos_gem_obj); +err_fini_buf: +	exynos_drm_fini_buf(dev, buf); +	return ERR_PTR(ret); +} + +int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, +				struct drm_file *file_priv) +{ +	struct drm_exynos_gem_create *args = data; +	struct exynos_drm_gem_obj *exynos_gem_obj; +	int ret; + +	exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size); +	if (IS_ERR(exynos_gem_obj)) +		return PTR_ERR(exynos_gem_obj); + +	ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv, +			&args->handle); +	if (ret) { +		exynos_drm_gem_destroy(exynos_gem_obj); +		return ret; +	} + +	return 0; +} + +dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev, +					unsigned int gem_handle, +					struct drm_file *filp) +{ +	struct exynos_drm_gem_obj *exynos_gem_obj; +	struct drm_gem_object *obj; + +	obj = drm_gem_object_lookup(dev, filp, gem_handle); +	if (!obj) { +		DRM_ERROR("failed to lookup gem object.\n"); +		return ERR_PTR(-EINVAL); +	} + +	exynos_gem_obj = to_exynos_gem_obj(obj); + +	return &exynos_gem_obj->buffer->dma_addr; +} + +void exynos_drm_gem_put_dma_addr(struct drm_device *dev, +					unsigned int gem_handle, +					struct drm_file *filp) +{ +	struct exynos_drm_gem_obj *exynos_gem_obj; +	struct drm_gem_object *obj; + +	obj = drm_gem_object_lookup(dev, filp, gem_handle); +	if (!obj) { +		DRM_ERROR("failed to lookup gem object.\n"); +		return; +	} + +	exynos_gem_obj = to_exynos_gem_obj(obj); + +	drm_gem_object_unreference_unlocked(obj); + +	/* +	 * decrease obj->refcount one more time because we has already +	 * increased it at exynos_drm_gem_get_dma_addr(). +	 */ +	drm_gem_object_unreference_unlocked(obj); +} + +int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, +				    struct drm_file *file_priv) +{ +	struct drm_exynos_gem_map_off *args = data; + +	DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n", +			args->handle, (unsigned long)args->offset); + +	if (!(dev->driver->driver_features & DRIVER_GEM)) { +		DRM_ERROR("does not support GEM.\n"); +		return -ENODEV; +	} + +	return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle, +			&args->offset); +} + +int exynos_drm_gem_mmap_buffer(struct file *filp, +				      struct vm_area_struct *vma) +{ +	struct drm_gem_object *obj = filp->private_data; +	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); +	struct drm_device *drm_dev = obj->dev; +	struct exynos_drm_gem_buf *buffer; +	unsigned long vm_size; +	int ret; + +	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); + +	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; +	vma->vm_private_data = obj; +	vma->vm_ops = drm_dev->driver->gem_vm_ops; + +	update_vm_cache_attr(exynos_gem_obj, vma); + +	vm_size = vma->vm_end - vma->vm_start; + +	/* +	 * a buffer contains information to physically continuous memory +	 * allocated by user request or at framebuffer creation. +	 */ +	buffer = exynos_gem_obj->buffer; + +	/* check if user-requested size is valid. */ +	if (vm_size > buffer->size) +		return -EINVAL; + +	ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages, +				buffer->dma_addr, buffer->size, +				&buffer->dma_attrs); +	if (ret < 0) { +		DRM_ERROR("failed to mmap.\n"); +		return ret; +	} + +	/* +	 * take a reference to this mapping of the object. And this reference +	 * is unreferenced by the corresponding vm_close call. +	 */ +	drm_gem_object_reference(obj); + +	drm_vm_open_locked(drm_dev, vma); + +	return 0; +} + +int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, +			      struct drm_file *file_priv) +{ +	struct drm_exynos_file_private *exynos_file_priv; +	struct drm_exynos_gem_mmap *args = data; +	struct drm_gem_object *obj; +	struct file *anon_filp; +	unsigned long addr; + +	if (!(dev->driver->driver_features & DRIVER_GEM)) { +		DRM_ERROR("does not support GEM.\n"); +		return -ENODEV; +	} + +	mutex_lock(&dev->struct_mutex); + +	obj = drm_gem_object_lookup(dev, file_priv, args->handle); +	if (!obj) { +		DRM_ERROR("failed to lookup gem object.\n"); +		mutex_unlock(&dev->struct_mutex); +		return -EINVAL; +	} + +	exynos_file_priv = file_priv->driver_priv; +	anon_filp = exynos_file_priv->anon_filp; +	anon_filp->private_data = obj; + +	addr = vm_mmap(anon_filp, 0, args->size, PROT_READ | PROT_WRITE, +			MAP_SHARED, 0); + +	drm_gem_object_unreference(obj); + +	if (IS_ERR_VALUE(addr)) { +		mutex_unlock(&dev->struct_mutex); +		return (int)addr; +	} + +	mutex_unlock(&dev->struct_mutex); + +	args->mapped = addr; + +	DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped); + +	return 0; +} + +int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, +				      struct drm_file *file_priv) +{	struct exynos_drm_gem_obj *exynos_gem_obj; +	struct drm_exynos_gem_info *args = data; +	struct drm_gem_object *obj; + +	mutex_lock(&dev->struct_mutex); + +	obj = drm_gem_object_lookup(dev, file_priv, args->handle); +	if (!obj) { +		DRM_ERROR("failed to lookup gem object.\n"); +		mutex_unlock(&dev->struct_mutex); +		return -EINVAL; +	} + +	exynos_gem_obj = to_exynos_gem_obj(obj); + +	args->flags = exynos_gem_obj->flags; +	args->size = exynos_gem_obj->size; + +	drm_gem_object_unreference(obj); +	mutex_unlock(&dev->struct_mutex); + +	return 0; +} + +struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma) +{ +	struct vm_area_struct *vma_copy; + +	vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL); +	if (!vma_copy) +		return NULL; + +	if (vma->vm_ops && vma->vm_ops->open) +		vma->vm_ops->open(vma); + +	if (vma->vm_file) +		get_file(vma->vm_file); + +	memcpy(vma_copy, vma, sizeof(*vma)); + +	vma_copy->vm_mm = NULL; +	vma_copy->vm_next = NULL; +	vma_copy->vm_prev = NULL; + +	return vma_copy; +} + +void exynos_gem_put_vma(struct vm_area_struct *vma) +{ +	if (!vma) +		return; + +	if (vma->vm_ops && vma->vm_ops->close) +		vma->vm_ops->close(vma); + +	if (vma->vm_file) +		fput(vma->vm_file); + +	kfree(vma); +} + +int exynos_gem_get_pages_from_userptr(unsigned long start, +						unsigned int npages, +						struct page **pages, +						struct vm_area_struct *vma) +{ +	int get_npages; + +	/* the memory region mmaped with VM_PFNMAP. */ +	if (vma_is_io(vma)) { +		unsigned int i; + +		for (i = 0; i < npages; ++i, start += PAGE_SIZE) { +			unsigned long pfn; +			int ret = follow_pfn(vma, start, &pfn); +			if (ret) +				return ret; + +			pages[i] = pfn_to_page(pfn); +		} + +		if (i != npages) { +			DRM_ERROR("failed to get user_pages.\n"); +			return -EINVAL; +		} + +		return 0; +	} + +	get_npages = get_user_pages(current, current->mm, start, +					npages, 1, 1, pages, NULL); +	get_npages = max(get_npages, 0); +	if (get_npages != npages) { +		DRM_ERROR("failed to get user_pages.\n"); +		while (get_npages) +			put_page(pages[--get_npages]); +		return -EFAULT; +	} + +	return 0; +} + +void exynos_gem_put_pages_to_userptr(struct page **pages, +					unsigned int npages, +					struct vm_area_struct *vma) +{ +	if (!vma_is_io(vma)) { +		unsigned int i; + +		for (i = 0; i < npages; i++) { +			set_page_dirty_lock(pages[i]); + +			/* +			 * undo the reference we took when populating +			 * the table. +			 */ +			put_page(pages[i]); +		} +	} +} + +int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, +				struct sg_table *sgt, +				enum dma_data_direction dir) +{ +	int nents; + +	mutex_lock(&drm_dev->struct_mutex); + +	nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir); +	if (!nents) { +		DRM_ERROR("failed to map sgl with dma.\n"); +		mutex_unlock(&drm_dev->struct_mutex); +		return nents; +	} + +	mutex_unlock(&drm_dev->struct_mutex); +	return 0; +} + +void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev, +				struct sg_table *sgt, +				enum dma_data_direction dir) +{ +	dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir); +} + +void exynos_drm_gem_free_object(struct drm_gem_object *obj) +{ +	struct exynos_drm_gem_obj *exynos_gem_obj; +	struct exynos_drm_gem_buf *buf; + +	exynos_gem_obj = to_exynos_gem_obj(obj); +	buf = exynos_gem_obj->buffer; + +	if (obj->import_attach) +		drm_prime_gem_destroy(obj, buf->sgt); + +	exynos_drm_gem_destroy(to_exynos_gem_obj(obj)); +} + +int exynos_drm_gem_dumb_create(struct drm_file *file_priv, +			       struct drm_device *dev, +			       struct drm_mode_create_dumb *args) +{ +	struct exynos_drm_gem_obj *exynos_gem_obj; +	int ret; + +	/* +	 * allocate memory to be used for framebuffer. +	 * - this callback would be called by user application +	 *	with DRM_IOCTL_MODE_CREATE_DUMB command. +	 */ + +	args->pitch = args->width * ((args->bpp + 7) / 8); +	args->size = args->pitch * args->height; + +	if (is_drm_iommu_supported(dev)) { +		exynos_gem_obj = exynos_drm_gem_create(dev, +			EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC, +			args->size); +	} else { +		exynos_gem_obj = exynos_drm_gem_create(dev, +			EXYNOS_BO_CONTIG | EXYNOS_BO_WC, +			args->size); +	} + +	if (IS_ERR(exynos_gem_obj)) { +		dev_warn(dev->dev, "FB allocation failed.\n"); +		return PTR_ERR(exynos_gem_obj); +	} + +	ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv, +			&args->handle); +	if (ret) { +		exynos_drm_gem_destroy(exynos_gem_obj); +		return ret; +	} + +	return 0; +} + +int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, +				   struct drm_device *dev, uint32_t handle, +				   uint64_t *offset) +{ +	struct drm_gem_object *obj; +	int ret = 0; + +	mutex_lock(&dev->struct_mutex); + +	/* +	 * get offset of memory allocated for drm framebuffer. +	 * - this callback would be called by user application +	 *	with DRM_IOCTL_MODE_MAP_DUMB command. +	 */ + +	obj = drm_gem_object_lookup(dev, file_priv, handle); +	if (!obj) { +		DRM_ERROR("failed to lookup gem object.\n"); +		ret = -EINVAL; +		goto unlock; +	} + +	ret = drm_gem_create_mmap_offset(obj); +	if (ret) +		goto out; + +	*offset = drm_vma_node_offset_addr(&obj->vma_node); +	DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); + +out: +	drm_gem_object_unreference(obj); +unlock: +	mutex_unlock(&dev->struct_mutex); +	return ret; +} + +int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ +	struct drm_gem_object *obj = vma->vm_private_data; +	struct drm_device *dev = obj->dev; +	unsigned long f_vaddr; +	pgoff_t page_offset; +	int ret; + +	page_offset = ((unsigned long)vmf->virtual_address - +			vma->vm_start) >> PAGE_SHIFT; +	f_vaddr = (unsigned long)vmf->virtual_address; + +	mutex_lock(&dev->struct_mutex); + +	ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset); +	if (ret < 0) +		DRM_ERROR("failed to map a buffer with user.\n"); + +	mutex_unlock(&dev->struct_mutex); + +	return convert_to_vm_err_msg(ret); +} + +int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) +{ +	struct exynos_drm_gem_obj *exynos_gem_obj; +	struct drm_gem_object *obj; +	int ret; + +	/* set vm_area_struct. */ +	ret = drm_gem_mmap(filp, vma); +	if (ret < 0) { +		DRM_ERROR("failed to mmap.\n"); +		return ret; +	} + +	obj = vma->vm_private_data; +	exynos_gem_obj = to_exynos_gem_obj(obj); + +	ret = check_gem_flags(exynos_gem_obj->flags); +	if (ret) { +		drm_gem_vm_close(vma); +		drm_gem_free_mmap_offset(obj); +		return ret; +	} + +	vma->vm_flags &= ~VM_PFNMAP; +	vma->vm_flags |= VM_MIXEDMAP; + +	update_vm_cache_attr(exynos_gem_obj, vma); + +	return ret; +} diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h new file mode 100644 index 00000000000..1592c0ba7de --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -0,0 +1,192 @@ +/* exynos_drm_gem.h + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * Authoer: Inki Dae <inki.dae@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#ifndef _EXYNOS_DRM_GEM_H_ +#define _EXYNOS_DRM_GEM_H_ + +#define to_exynos_gem_obj(x)	container_of(x,\ +			struct exynos_drm_gem_obj, base) + +#define IS_NONCONTIG_BUFFER(f)		(f & EXYNOS_BO_NONCONTIG) + +/* + * exynos drm gem buffer structure. + * + * @kvaddr: kernel virtual address to allocated memory region. + * *userptr: user space address. + * @dma_addr: bus address(accessed by dma) to allocated memory region. + *	- this address could be physical address without IOMMU and + *	device address with IOMMU. + * @write: whether pages will be written to by the caller. + * @pages: Array of backing pages. + * @sgt: sg table to transfer page data. + * @size: size of allocated memory region. + * @pfnmap: indicate whether memory region from userptr is mmaped with + *	VM_PFNMAP or not. + */ +struct exynos_drm_gem_buf { +	void __iomem		*kvaddr; +	unsigned long		userptr; +	dma_addr_t		dma_addr; +	struct dma_attrs	dma_attrs; +	unsigned int		write; +	struct page		**pages; +	struct sg_table		*sgt; +	unsigned long		size; +	bool			pfnmap; +}; + +/* + * exynos drm buffer structure. + * + * @base: a gem object. + *	- a new handle to this gem object would be created + *	by drm_gem_handle_create(). + * @buffer: a pointer to exynos_drm_gem_buffer object. + *	- contain the information to memory region allocated + *	by user request or at framebuffer creation. + *	continuous memory region allocated by user request + *	or at framebuffer creation. + * @size: size requested from user, in bytes and this size is aligned + *	in page unit. + * @vma: a pointer to vm_area. + * @flags: indicate memory type to allocated buffer and cache attruibute. + * + * P.S. this object would be transferred to user as kms_bo.handle so + *	user can access the buffer through kms_bo.handle. + */ +struct exynos_drm_gem_obj { +	struct drm_gem_object		base; +	struct exynos_drm_gem_buf	*buffer; +	unsigned long			size; +	struct vm_area_struct		*vma; +	unsigned int			flags; +}; + +struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); + +/* destroy a buffer with gem object */ +void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj); + +/* create a private gem object and initialize it. */ +struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, +						      unsigned long size); + +/* create a new buffer with gem object */ +struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, +						unsigned int flags, +						unsigned long size); + +/* + * request gem object creation and buffer allocation as the size + * that it is calculated with framebuffer information such as width, + * height and bpp. + */ +int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, +				struct drm_file *file_priv); + +/* + * get dma address from gem handle and this function could be used for + * other drivers such as 2d/3d acceleration drivers. + * with this function call, gem object reference count would be increased. + */ +dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev, +					unsigned int gem_handle, +					struct drm_file *filp); + +/* + * put dma address from gem handle and this function could be used for + * other drivers such as 2d/3d acceleration drivers. + * with this function call, gem object reference count would be decreased. + */ +void exynos_drm_gem_put_dma_addr(struct drm_device *dev, +					unsigned int gem_handle, +					struct drm_file *filp); + +/* get buffer offset to map to user space. */ +int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, +				    struct drm_file *file_priv); + +/* + * mmap the physically continuous memory that a gem object contains + * to user space. + */ +int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, +			      struct drm_file *file_priv); + +int exynos_drm_gem_mmap_buffer(struct file *filp, +				      struct vm_area_struct *vma); + +/* map user space allocated by malloc to pages. */ +int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data, +				      struct drm_file *file_priv); + +/* get buffer information to memory region allocated by gem. */ +int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, +				      struct drm_file *file_priv); + +/* get buffer size to gem handle. */ +unsigned long exynos_drm_gem_get_size(struct drm_device *dev, +						unsigned int gem_handle, +						struct drm_file *file_priv); + +/* free gem object. */ +void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj); + +/* create memory region for drm framebuffer. */ +int exynos_drm_gem_dumb_create(struct drm_file *file_priv, +			       struct drm_device *dev, +			       struct drm_mode_create_dumb *args); + +/* map memory region for drm framebuffer to user space. */ +int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, +				   struct drm_device *dev, uint32_t handle, +				   uint64_t *offset); + +/* page fault handler and mmap fault address(virtual) to physical memory. */ +int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); + +/* set vm_flags and we can change the vm attribute to other one at here. */ +int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); + +static inline int vma_is_io(struct vm_area_struct *vma) +{ +	return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); +} + +/* get a copy of a virtual memory region. */ +struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma); + +/* release a userspace virtual memory area. */ +void exynos_gem_put_vma(struct vm_area_struct *vma); + +/* get pages from user space. */ +int exynos_gem_get_pages_from_userptr(unsigned long start, +						unsigned int npages, +						struct page **pages, +						struct vm_area_struct *vma); + +/* drop the reference to pages. */ +void exynos_gem_put_pages_to_userptr(struct page **pages, +					unsigned int npages, +					struct vm_area_struct *vma); + +/* map sgt with dma region. */ +int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, +				struct sg_table *sgt, +				enum dma_data_direction dir); + +/* unmap sgt from dma region. */ +void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev, +				struct sg_table *sgt, +				enum dma_data_direction dir); + +#endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c new file mode 100644 index 00000000000..9e3ff167296 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -0,0 +1,1802 @@ +/* + * Copyright (C) 2012 Samsung Electronics Co.Ltd + * Authors: + *	Eunchul Kim <chulspro.kim@samsung.com> + *	Jinyoung Jeon <jy0.jeon@samsung.com> + *	Sangmin Lee <lsmin.lee@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + * + */ +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/pm_runtime.h> +#include <plat/map-base.h> + +#include <drm/drmP.h> +#include <drm/exynos_drm.h> +#include "regs-gsc.h" +#include "exynos_drm_drv.h" +#include "exynos_drm_ipp.h" +#include "exynos_drm_gsc.h" + +/* + * GSC stands for General SCaler and + * supports image scaler/rotator and input/output DMA operations. + * input DMA reads image data from the memory. + * output DMA writes image data to memory. + * GSC supports image rotation and image effect functions. + * + * M2M operation : supports crop/scale/rotation/csc so on. + * Memory ----> GSC H/W ----> Memory. + * Writeback operation : supports cloned screen with FIMD. + * FIMD ----> GSC H/W ----> Memory. + * Output operation : supports direct display using local path. + * Memory ----> GSC H/W ----> FIMD, Mixer. + */ + +/* + * TODO + * 1. check suspend/resume api if needed. + * 2. need to check use case platform_device_id. + * 3. check src/dst size with, height. + * 4. added check_prepare api for right register. + * 5. need to add supported list in prop_list. + * 6. check prescaler/scaler optimization. + */ + +#define GSC_MAX_DEVS	4 +#define GSC_MAX_SRC		4 +#define GSC_MAX_DST		16 +#define GSC_RESET_TIMEOUT	50 +#define GSC_BUF_STOP	1 +#define GSC_BUF_START	2 +#define GSC_REG_SZ		16 +#define GSC_WIDTH_ITU_709	1280 +#define GSC_SC_UP_MAX_RATIO		65536 +#define GSC_SC_DOWN_RATIO_7_8		74898 +#define GSC_SC_DOWN_RATIO_6_8		87381 +#define GSC_SC_DOWN_RATIO_5_8		104857 +#define GSC_SC_DOWN_RATIO_4_8		131072 +#define GSC_SC_DOWN_RATIO_3_8		174762 +#define GSC_SC_DOWN_RATIO_2_8		262144 +#define GSC_REFRESH_MIN	12 +#define GSC_REFRESH_MAX	60 +#define GSC_CROP_MAX	8192 +#define GSC_CROP_MIN	32 +#define GSC_SCALE_MAX	4224 +#define GSC_SCALE_MIN	32 +#define GSC_COEF_RATIO	7 +#define GSC_COEF_PHASE	9 +#define GSC_COEF_ATTR	16 +#define GSC_COEF_H_8T	8 +#define GSC_COEF_V_4T	4 +#define GSC_COEF_DEPTH	3 + +#define get_gsc_context(dev)	platform_get_drvdata(to_platform_device(dev)) +#define get_ctx_from_ippdrv(ippdrv)	container_of(ippdrv,\ +					struct gsc_context, ippdrv); +#define gsc_read(offset)		readl(ctx->regs + (offset)) +#define gsc_write(cfg, offset)	writel(cfg, ctx->regs + (offset)) + +/* + * A structure of scaler. + * + * @range: narrow, wide. + * @pre_shfactor: pre sclaer shift factor. + * @pre_hratio: horizontal ratio of the prescaler. + * @pre_vratio: vertical ratio of the prescaler. + * @main_hratio: the main scaler's horizontal ratio. + * @main_vratio: the main scaler's vertical ratio. + */ +struct gsc_scaler { +	bool	range; +	u32	pre_shfactor; +	u32	pre_hratio; +	u32	pre_vratio; +	unsigned long main_hratio; +	unsigned long main_vratio; +}; + +/* + * A structure of scaler capability. + * + * find user manual 49.2 features. + * @tile_w: tile mode or rotation width. + * @tile_h: tile mode or rotation height. + * @w: other cases width. + * @h: other cases height. + */ +struct gsc_capability { +	/* tile or rotation */ +	u32	tile_w; +	u32	tile_h; +	/* other cases */ +	u32	w; +	u32	h; +}; + +/* + * A structure of gsc context. + * + * @ippdrv: prepare initialization using ippdrv. + * @regs_res: register resources. + * @regs: memory mapped io registers. + * @lock: locking of operations. + * @gsc_clk: gsc gate clock. + * @sc: scaler infomations. + * @id: gsc id. + * @irq: irq number. + * @rotation: supports rotation of src. + * @suspended: qos operations. + */ +struct gsc_context { +	struct exynos_drm_ippdrv	ippdrv; +	struct resource	*regs_res; +	void __iomem	*regs; +	struct mutex	lock; +	struct clk	*gsc_clk; +	struct gsc_scaler	sc; +	int	id; +	int	irq; +	bool	rotation; +	bool	suspended; +}; + +/* 8-tap Filter Coefficient */ +static const int h_coef_8t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_H_8T] = { +	{	/* Ratio <= 65536 (~8:8) */ +		{  0,  0,   0, 128,   0,   0,  0,  0 }, +		{ -1,  2,  -6, 127,   7,  -2,  1,  0 }, +		{ -1,  4, -12, 125,  16,  -5,  1,  0 }, +		{ -1,  5, -15, 120,  25,  -8,  2,  0 }, +		{ -1,  6, -18, 114,  35, -10,  3, -1 }, +		{ -1,  6, -20, 107,  46, -13,  4, -1 }, +		{ -2,  7, -21,  99,  57, -16,  5, -1 }, +		{ -1,  6, -20,  89,  68, -18,  5, -1 }, +		{ -1,  6, -20,  79,  79, -20,  6, -1 }, +		{ -1,  5, -18,  68,  89, -20,  6, -1 }, +		{ -1,  5, -16,  57,  99, -21,  7, -2 }, +		{ -1,  4, -13,  46, 107, -20,  6, -1 }, +		{ -1,  3, -10,  35, 114, -18,  6, -1 }, +		{  0,  2,  -8,  25, 120, -15,  5, -1 }, +		{  0,  1,  -5,  16, 125, -12,  4, -1 }, +		{  0,  1,  -2,   7, 127,  -6,  2, -1 } +	}, {	/* 65536 < Ratio <= 74898 (~8:7) */ +		{  3, -8,  14, 111,  13,  -8,  3,  0 }, +		{  2, -6,   7, 112,  21, -10,  3, -1 }, +		{  2, -4,   1, 110,  28, -12,  4, -1 }, +		{  1, -2,  -3, 106,  36, -13,  4, -1 }, +		{  1, -1,  -7, 103,  44, -15,  4, -1 }, +		{  1,  1, -11,  97,  53, -16,  4, -1 }, +		{  0,  2, -13,  91,  61, -16,  4, -1 }, +		{  0,  3, -15,  85,  69, -17,  4, -1 }, +		{  0,  3, -16,  77,  77, -16,  3,  0 }, +		{ -1,  4, -17,  69,  85, -15,  3,  0 }, +		{ -1,  4, -16,  61,  91, -13,  2,  0 }, +		{ -1,  4, -16,  53,  97, -11,  1,  1 }, +		{ -1,  4, -15,  44, 103,  -7, -1,  1 }, +		{ -1,  4, -13,  36, 106,  -3, -2,  1 }, +		{ -1,  4, -12,  28, 110,   1, -4,  2 }, +		{ -1,  3, -10,  21, 112,   7, -6,  2 } +	}, {	/* 74898 < Ratio <= 87381 (~8:6) */ +		{ 2, -11,  25,  96, 25, -11,   2,  0 }, +		{ 2, -10,  19,  96, 31, -12,   2,  0 }, +		{ 2,  -9,  14,  94, 37, -12,   2,  0 }, +		{ 2,  -8,  10,  92, 43, -12,   1,  0 }, +		{ 2,  -7,   5,  90, 49, -12,   1,  0 }, +		{ 2,  -5,   1,  86, 55, -12,   0,  1 }, +		{ 2,  -4,  -2,  82, 61, -11,  -1,  1 }, +		{ 1,  -3,  -5,  77, 67,  -9,  -1,  1 }, +		{ 1,  -2,  -7,  72, 72,  -7,  -2,  1 }, +		{ 1,  -1,  -9,  67, 77,  -5,  -3,  1 }, +		{ 1,  -1, -11,  61, 82,  -2,  -4,  2 }, +		{ 1,   0, -12,  55, 86,   1,  -5,  2 }, +		{ 0,   1, -12,  49, 90,   5,  -7,  2 }, +		{ 0,   1, -12,  43, 92,  10,  -8,  2 }, +		{ 0,   2, -12,  37, 94,  14,  -9,  2 }, +		{ 0,   2, -12,  31, 96,  19, -10,  2 } +	}, {	/* 87381 < Ratio <= 104857 (~8:5) */ +		{ -1,  -8, 33,  80, 33,  -8,  -1,  0 }, +		{ -1,  -8, 28,  80, 37,  -7,  -2,  1 }, +		{  0,  -8, 24,  79, 41,  -7,  -2,  1 }, +		{  0,  -8, 20,  78, 46,  -6,  -3,  1 }, +		{  0,  -8, 16,  76, 50,  -4,  -3,  1 }, +		{  0,  -7, 13,  74, 54,  -3,  -4,  1 }, +		{  1,  -7, 10,  71, 58,  -1,  -5,  1 }, +		{  1,  -6,  6,  68, 62,   1,  -5,  1 }, +		{  1,  -6,  4,  65, 65,   4,  -6,  1 }, +		{  1,  -5,  1,  62, 68,   6,  -6,  1 }, +		{  1,  -5, -1,  58, 71,  10,  -7,  1 }, +		{  1,  -4, -3,  54, 74,  13,  -7,  0 }, +		{  1,  -3, -4,  50, 76,  16,  -8,  0 }, +		{  1,  -3, -6,  46, 78,  20,  -8,  0 }, +		{  1,  -2, -7,  41, 79,  24,  -8,  0 }, +		{  1,  -2, -7,  37, 80,  28,  -8, -1 } +	}, {	/* 104857 < Ratio <= 131072 (~8:4) */ +		{ -3,   0, 35,  64, 35,   0,  -3,  0 }, +		{ -3,  -1, 32,  64, 38,   1,  -3,  0 }, +		{ -2,  -2, 29,  63, 41,   2,  -3,  0 }, +		{ -2,  -3, 27,  63, 43,   4,  -4,  0 }, +		{ -2,  -3, 24,  61, 46,   6,  -4,  0 }, +		{ -2,  -3, 21,  60, 49,   7,  -4,  0 }, +		{ -1,  -4, 19,  59, 51,   9,  -4, -1 }, +		{ -1,  -4, 16,  57, 53,  12,  -4, -1 }, +		{ -1,  -4, 14,  55, 55,  14,  -4, -1 }, +		{ -1,  -4, 12,  53, 57,  16,  -4, -1 }, +		{ -1,  -4,  9,  51, 59,  19,  -4, -1 }, +		{  0,  -4,  7,  49, 60,  21,  -3, -2 }, +		{  0,  -4,  6,  46, 61,  24,  -3, -2 }, +		{  0,  -4,  4,  43, 63,  27,  -3, -2 }, +		{  0,  -3,  2,  41, 63,  29,  -2, -2 }, +		{  0,  -3,  1,  38, 64,  32,  -1, -3 } +	}, {	/* 131072 < Ratio <= 174762 (~8:3) */ +		{ -1,   8, 33,  48, 33,   8,  -1,  0 }, +		{ -1,   7, 31,  49, 35,   9,  -1, -1 }, +		{ -1,   6, 30,  49, 36,  10,  -1, -1 }, +		{ -1,   5, 28,  48, 38,  12,  -1, -1 }, +		{ -1,   4, 26,  48, 39,  13,   0, -1 }, +		{ -1,   3, 24,  47, 41,  15,   0, -1 }, +		{ -1,   2, 23,  47, 42,  16,   0, -1 }, +		{ -1,   2, 21,  45, 43,  18,   1, -1 }, +		{ -1,   1, 19,  45, 45,  19,   1, -1 }, +		{ -1,   1, 18,  43, 45,  21,   2, -1 }, +		{ -1,   0, 16,  42, 47,  23,   2, -1 }, +		{ -1,   0, 15,  41, 47,  24,   3, -1 }, +		{ -1,   0, 13,  39, 48,  26,   4, -1 }, +		{ -1,  -1, 12,  38, 48,  28,   5, -1 }, +		{ -1,  -1, 10,  36, 49,  30,   6, -1 }, +		{ -1,  -1,  9,  35, 49,  31,   7, -1 } +	}, {	/* 174762 < Ratio <= 262144 (~8:2) */ +		{  2,  13, 30,  38, 30,  13,   2,  0 }, +		{  2,  12, 29,  38, 30,  14,   3,  0 }, +		{  2,  11, 28,  38, 31,  15,   3,  0 }, +		{  2,  10, 26,  38, 32,  16,   4,  0 }, +		{  1,  10, 26,  37, 33,  17,   4,  0 }, +		{  1,   9, 24,  37, 34,  18,   5,  0 }, +		{  1,   8, 24,  37, 34,  19,   5,  0 }, +		{  1,   7, 22,  36, 35,  20,   6,  1 }, +		{  1,   6, 21,  36, 36,  21,   6,  1 }, +		{  1,   6, 20,  35, 36,  22,   7,  1 }, +		{  0,   5, 19,  34, 37,  24,   8,  1 }, +		{  0,   5, 18,  34, 37,  24,   9,  1 }, +		{  0,   4, 17,  33, 37,  26,  10,  1 }, +		{  0,   4, 16,  32, 38,  26,  10,  2 }, +		{  0,   3, 15,  31, 38,  28,  11,  2 }, +		{  0,   3, 14,  30, 38,  29,  12,  2 } +	} +}; + +/* 4-tap Filter Coefficient */ +static const int v_coef_4t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_V_4T] = { +	{	/* Ratio <= 65536 (~8:8) */ +		{  0, 128,   0,  0 }, +		{ -4, 127,   5,  0 }, +		{ -6, 124,  11, -1 }, +		{ -8, 118,  19, -1 }, +		{ -8, 111,  27, -2 }, +		{ -8, 102,  37, -3 }, +		{ -8,  92,  48, -4 }, +		{ -7,  81,  59, -5 }, +		{ -6,  70,  70, -6 }, +		{ -5,  59,  81, -7 }, +		{ -4,  48,  92, -8 }, +		{ -3,  37, 102, -8 }, +		{ -2,  27, 111, -8 }, +		{ -1,  19, 118, -8 }, +		{ -1,  11, 124, -6 }, +		{  0,   5, 127, -4 } +	}, {	/* 65536 < Ratio <= 74898 (~8:7) */ +		{  8, 112,   8,  0 }, +		{  4, 111,  14, -1 }, +		{  1, 109,  20, -2 }, +		{ -2, 105,  27, -2 }, +		{ -3, 100,  34, -3 }, +		{ -5,  93,  43, -3 }, +		{ -5,  86,  51, -4 }, +		{ -5,  77,  60, -4 }, +		{ -5,  69,  69, -5 }, +		{ -4,  60,  77, -5 }, +		{ -4,  51,  86, -5 }, +		{ -3,  43,  93, -5 }, +		{ -3,  34, 100, -3 }, +		{ -2,  27, 105, -2 }, +		{ -2,  20, 109,  1 }, +		{ -1,  14, 111,  4 } +	}, {	/* 74898 < Ratio <= 87381 (~8:6) */ +		{ 16,  96,  16,  0 }, +		{ 12,  97,  21, -2 }, +		{  8,  96,  26, -2 }, +		{  5,  93,  32, -2 }, +		{  2,  89,  39, -2 }, +		{  0,  84,  46, -2 }, +		{ -1,  79,  53, -3 }, +		{ -2,  73,  59, -2 }, +		{ -2,  66,  66, -2 }, +		{ -2,  59,  73, -2 }, +		{ -3,  53,  79, -1 }, +		{ -2,  46,  84,  0 }, +		{ -2,  39,  89,  2 }, +		{ -2,  32,  93,  5 }, +		{ -2,  26,  96,  8 }, +		{ -2,  21,  97, 12 } +	}, {	/* 87381 < Ratio <= 104857 (~8:5) */ +		{ 22,  84,  22,  0 }, +		{ 18,  85,  26, -1 }, +		{ 14,  84,  31, -1 }, +		{ 11,  82,  36, -1 }, +		{  8,  79,  42, -1 }, +		{  6,  76,  47, -1 }, +		{  4,  72,  52,  0 }, +		{  2,  68,  58,  0 }, +		{  1,  63,  63,  1 }, +		{  0,  58,  68,  2 }, +		{  0,  52,  72,  4 }, +		{ -1,  47,  76,  6 }, +		{ -1,  42,  79,  8 }, +		{ -1,  36,  82, 11 }, +		{ -1,  31,  84, 14 }, +		{ -1,  26,  85, 18 } +	}, {	/* 104857 < Ratio <= 131072 (~8:4) */ +		{ 26,  76,  26,  0 }, +		{ 22,  76,  30,  0 }, +		{ 19,  75,  34,  0 }, +		{ 16,  73,  38,  1 }, +		{ 13,  71,  43,  1 }, +		{ 10,  69,  47,  2 }, +		{  8,  66,  51,  3 }, +		{  6,  63,  55,  4 }, +		{  5,  59,  59,  5 }, +		{  4,  55,  63,  6 }, +		{  3,  51,  66,  8 }, +		{  2,  47,  69, 10 }, +		{  1,  43,  71, 13 }, +		{  1,  38,  73, 16 }, +		{  0,  34,  75, 19 }, +		{  0,  30,  76, 22 } +	}, {	/* 131072 < Ratio <= 174762 (~8:3) */ +		{ 29,  70,  29,  0 }, +		{ 26,  68,  32,  2 }, +		{ 23,  67,  36,  2 }, +		{ 20,  66,  39,  3 }, +		{ 17,  65,  43,  3 }, +		{ 15,  63,  46,  4 }, +		{ 12,  61,  50,  5 }, +		{ 10,  58,  53,  7 }, +		{  8,  56,  56,  8 }, +		{  7,  53,  58, 10 }, +		{  5,  50,  61, 12 }, +		{  4,  46,  63, 15 }, +		{  3,  43,  65, 17 }, +		{  3,  39,  66, 20 }, +		{  2,  36,  67, 23 }, +		{  2,  32,  68, 26 } +	}, {	/* 174762 < Ratio <= 262144 (~8:2) */ +		{ 32,  64,  32,  0 }, +		{ 28,  63,  34,  3 }, +		{ 25,  62,  37,  4 }, +		{ 22,  62,  40,  4 }, +		{ 19,  61,  43,  5 }, +		{ 17,  59,  46,  6 }, +		{ 15,  58,  48,  7 }, +		{ 13,  55,  51,  9 }, +		{ 11,  53,  53, 11 }, +		{  9,  51,  55, 13 }, +		{  7,  48,  58, 15 }, +		{  6,  46,  59, 17 }, +		{  5,  43,  61, 19 }, +		{  4,  40,  62, 22 }, +		{  4,  37,  62, 25 }, +		{  3,  34,  63, 28 } +	} +}; + +static int gsc_sw_reset(struct gsc_context *ctx) +{ +	u32 cfg; +	int count = GSC_RESET_TIMEOUT; + +	/* s/w reset */ +	cfg = (GSC_SW_RESET_SRESET); +	gsc_write(cfg, GSC_SW_RESET); + +	/* wait s/w reset complete */ +	while (count--) { +		cfg = gsc_read(GSC_SW_RESET); +		if (!cfg) +			break; +		usleep_range(1000, 2000); +	} + +	if (cfg) { +		DRM_ERROR("failed to reset gsc h/w.\n"); +		return -EBUSY; +	} + +	/* reset sequence */ +	cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK); +	cfg |= (GSC_IN_BASE_ADDR_MASK | +		GSC_IN_BASE_ADDR_PINGPONG(0)); +	gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK); +	gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK); +	gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK); + +	cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK); +	cfg |= (GSC_OUT_BASE_ADDR_MASK | +		GSC_OUT_BASE_ADDR_PINGPONG(0)); +	gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK); +	gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK); +	gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK); + +	return 0; +} + +static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable) +{ +	u32 gscblk_cfg; + +	gscblk_cfg = readl(SYSREG_GSCBLK_CFG1); + +	if (enable) +		gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) | +				GSC_BLK_GSCL_WB_IN_SRC_SEL(ctx->id) | +				GSC_BLK_SW_RESET_WB_DEST(ctx->id); +	else +		gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id); + +	writel(gscblk_cfg, SYSREG_GSCBLK_CFG1); +} + +static void gsc_handle_irq(struct gsc_context *ctx, bool enable, +		bool overflow, bool done) +{ +	u32 cfg; + +	DRM_DEBUG_KMS("enable[%d]overflow[%d]level[%d]\n", +			enable, overflow, done); + +	cfg = gsc_read(GSC_IRQ); +	cfg |= (GSC_IRQ_OR_MASK | GSC_IRQ_FRMDONE_MASK); + +	if (enable) +		cfg |= GSC_IRQ_ENABLE; +	else +		cfg &= ~GSC_IRQ_ENABLE; + +	if (overflow) +		cfg &= ~GSC_IRQ_OR_MASK; +	else +		cfg |= GSC_IRQ_OR_MASK; + +	if (done) +		cfg &= ~GSC_IRQ_FRMDONE_MASK; +	else +		cfg |= GSC_IRQ_FRMDONE_MASK; + +	gsc_write(cfg, GSC_IRQ); +} + + +static int gsc_src_set_fmt(struct device *dev, u32 fmt) +{ +	struct gsc_context *ctx = get_gsc_context(dev); +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	u32 cfg; + +	DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); + +	cfg = gsc_read(GSC_IN_CON); +	cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK | +		 GSC_IN_CHROMA_ORDER_MASK | GSC_IN_FORMAT_MASK | +		 GSC_IN_TILE_TYPE_MASK | GSC_IN_TILE_MODE | +		 GSC_IN_CHROM_STRIDE_SEL_MASK | GSC_IN_RB_SWAP_MASK); + +	switch (fmt) { +	case DRM_FORMAT_RGB565: +		cfg |= GSC_IN_RGB565; +		break; +	case DRM_FORMAT_XRGB8888: +		cfg |= GSC_IN_XRGB8888; +		break; +	case DRM_FORMAT_BGRX8888: +		cfg |= (GSC_IN_XRGB8888 | GSC_IN_RB_SWAP); +		break; +	case DRM_FORMAT_YUYV: +		cfg |= (GSC_IN_YUV422_1P | +			GSC_IN_YUV422_1P_ORDER_LSB_Y | +			GSC_IN_CHROMA_ORDER_CBCR); +		break; +	case DRM_FORMAT_YVYU: +		cfg |= (GSC_IN_YUV422_1P | +			GSC_IN_YUV422_1P_ORDER_LSB_Y | +			GSC_IN_CHROMA_ORDER_CRCB); +		break; +	case DRM_FORMAT_UYVY: +		cfg |= (GSC_IN_YUV422_1P | +			GSC_IN_YUV422_1P_OEDER_LSB_C | +			GSC_IN_CHROMA_ORDER_CBCR); +		break; +	case DRM_FORMAT_VYUY: +		cfg |= (GSC_IN_YUV422_1P | +			GSC_IN_YUV422_1P_OEDER_LSB_C | +			GSC_IN_CHROMA_ORDER_CRCB); +		break; +	case DRM_FORMAT_NV21: +	case DRM_FORMAT_NV61: +		cfg |= (GSC_IN_CHROMA_ORDER_CRCB | +			GSC_IN_YUV420_2P); +		break; +	case DRM_FORMAT_YUV422: +		cfg |= GSC_IN_YUV422_3P; +		break; +	case DRM_FORMAT_YUV420: +	case DRM_FORMAT_YVU420: +		cfg |= GSC_IN_YUV420_3P; +		break; +	case DRM_FORMAT_NV12: +	case DRM_FORMAT_NV16: +		cfg |= (GSC_IN_CHROMA_ORDER_CBCR | +			GSC_IN_YUV420_2P); +		break; +	case DRM_FORMAT_NV12MT: +		cfg |= (GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE); +		break; +	default: +		dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt); +		return -EINVAL; +	} + +	gsc_write(cfg, GSC_IN_CON); + +	return 0; +} + +static int gsc_src_set_transf(struct device *dev, +		enum drm_exynos_degree degree, +		enum drm_exynos_flip flip, bool *swap) +{ +	struct gsc_context *ctx = get_gsc_context(dev); +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	u32 cfg; + +	DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip); + +	cfg = gsc_read(GSC_IN_CON); +	cfg &= ~GSC_IN_ROT_MASK; + +	switch (degree) { +	case EXYNOS_DRM_DEGREE_0: +		if (flip & EXYNOS_DRM_FLIP_VERTICAL) +			cfg |= GSC_IN_ROT_XFLIP; +		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) +			cfg |= GSC_IN_ROT_YFLIP; +		break; +	case EXYNOS_DRM_DEGREE_90: +		if (flip & EXYNOS_DRM_FLIP_VERTICAL) +			cfg |= GSC_IN_ROT_90_XFLIP; +		else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) +			cfg |= GSC_IN_ROT_90_YFLIP; +		else +			cfg |= GSC_IN_ROT_90; +		break; +	case EXYNOS_DRM_DEGREE_180: +		cfg |= GSC_IN_ROT_180; +		break; +	case EXYNOS_DRM_DEGREE_270: +		cfg |= GSC_IN_ROT_270; +		break; +	default: +		dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree); +		return -EINVAL; +	} + +	gsc_write(cfg, GSC_IN_CON); + +	ctx->rotation = cfg & +		(GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0; +	*swap = ctx->rotation; + +	return 0; +} + +static int gsc_src_set_size(struct device *dev, int swap, +		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz) +{ +	struct gsc_context *ctx = get_gsc_context(dev); +	struct drm_exynos_pos img_pos = *pos; +	struct gsc_scaler *sc = &ctx->sc; +	u32 cfg; + +	DRM_DEBUG_KMS("swap[%d]x[%d]y[%d]w[%d]h[%d]\n", +		swap, pos->x, pos->y, pos->w, pos->h); + +	if (swap) { +		img_pos.w = pos->h; +		img_pos.h = pos->w; +	} + +	/* pixel offset */ +	cfg = (GSC_SRCIMG_OFFSET_X(img_pos.x) | +		GSC_SRCIMG_OFFSET_Y(img_pos.y)); +	gsc_write(cfg, GSC_SRCIMG_OFFSET); + +	/* cropped size */ +	cfg = (GSC_CROPPED_WIDTH(img_pos.w) | +		GSC_CROPPED_HEIGHT(img_pos.h)); +	gsc_write(cfg, GSC_CROPPED_SIZE); + +	DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", sz->hsize, sz->vsize); + +	/* original size */ +	cfg = gsc_read(GSC_SRCIMG_SIZE); +	cfg &= ~(GSC_SRCIMG_HEIGHT_MASK | +		GSC_SRCIMG_WIDTH_MASK); + +	cfg |= (GSC_SRCIMG_WIDTH(sz->hsize) | +		GSC_SRCIMG_HEIGHT(sz->vsize)); + +	gsc_write(cfg, GSC_SRCIMG_SIZE); + +	cfg = gsc_read(GSC_IN_CON); +	cfg &= ~GSC_IN_RGB_TYPE_MASK; + +	DRM_DEBUG_KMS("width[%d]range[%d]\n", pos->w, sc->range); + +	if (pos->w >= GSC_WIDTH_ITU_709) +		if (sc->range) +			cfg |= GSC_IN_RGB_HD_WIDE; +		else +			cfg |= GSC_IN_RGB_HD_NARROW; +	else +		if (sc->range) +			cfg |= GSC_IN_RGB_SD_WIDE; +		else +			cfg |= GSC_IN_RGB_SD_NARROW; + +	gsc_write(cfg, GSC_IN_CON); + +	return 0; +} + +static int gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id, +		enum drm_exynos_ipp_buf_type buf_type) +{ +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	bool masked; +	u32 cfg; +	u32 mask = 0x00000001 << buf_id; + +	DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type); + +	/* mask register set */ +	cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK); + +	switch (buf_type) { +	case IPP_BUF_ENQUEUE: +		masked = false; +		break; +	case IPP_BUF_DEQUEUE: +		masked = true; +		break; +	default: +		dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n"); +		return -EINVAL; +	} + +	/* sequence id */ +	cfg &= ~mask; +	cfg |= masked << buf_id; +	gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK); +	gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK); +	gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK); + +	return 0; +} + +static int gsc_src_set_addr(struct device *dev, +		struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id, +		enum drm_exynos_ipp_buf_type buf_type) +{ +	struct gsc_context *ctx = get_gsc_context(dev); +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; +	struct drm_exynos_ipp_property *property; + +	if (!c_node) { +		DRM_ERROR("failed to get c_node.\n"); +		return -EFAULT; +	} + +	property = &c_node->property; + +	DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n", +		property->prop_id, buf_id, buf_type); + +	if (buf_id > GSC_MAX_SRC) { +		dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id); +		return -EINVAL; +	} + +	/* address register set */ +	switch (buf_type) { +	case IPP_BUF_ENQUEUE: +		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y], +			GSC_IN_BASE_ADDR_Y(buf_id)); +		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB], +			GSC_IN_BASE_ADDR_CB(buf_id)); +		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR], +			GSC_IN_BASE_ADDR_CR(buf_id)); +		break; +	case IPP_BUF_DEQUEUE: +		gsc_write(0x0, GSC_IN_BASE_ADDR_Y(buf_id)); +		gsc_write(0x0, GSC_IN_BASE_ADDR_CB(buf_id)); +		gsc_write(0x0, GSC_IN_BASE_ADDR_CR(buf_id)); +		break; +	default: +		/* bypass */ +		break; +	} + +	return gsc_src_set_buf_seq(ctx, buf_id, buf_type); +} + +static struct exynos_drm_ipp_ops gsc_src_ops = { +	.set_fmt = gsc_src_set_fmt, +	.set_transf = gsc_src_set_transf, +	.set_size = gsc_src_set_size, +	.set_addr = gsc_src_set_addr, +}; + +static int gsc_dst_set_fmt(struct device *dev, u32 fmt) +{ +	struct gsc_context *ctx = get_gsc_context(dev); +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	u32 cfg; + +	DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); + +	cfg = gsc_read(GSC_OUT_CON); +	cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK | +		 GSC_OUT_CHROMA_ORDER_MASK | GSC_OUT_FORMAT_MASK | +		 GSC_OUT_CHROM_STRIDE_SEL_MASK | GSC_OUT_RB_SWAP_MASK | +		 GSC_OUT_GLOBAL_ALPHA_MASK); + +	switch (fmt) { +	case DRM_FORMAT_RGB565: +		cfg |= GSC_OUT_RGB565; +		break; +	case DRM_FORMAT_XRGB8888: +		cfg |= GSC_OUT_XRGB8888; +		break; +	case DRM_FORMAT_BGRX8888: +		cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_RB_SWAP); +		break; +	case DRM_FORMAT_YUYV: +		cfg |= (GSC_OUT_YUV422_1P | +			GSC_OUT_YUV422_1P_ORDER_LSB_Y | +			GSC_OUT_CHROMA_ORDER_CBCR); +		break; +	case DRM_FORMAT_YVYU: +		cfg |= (GSC_OUT_YUV422_1P | +			GSC_OUT_YUV422_1P_ORDER_LSB_Y | +			GSC_OUT_CHROMA_ORDER_CRCB); +		break; +	case DRM_FORMAT_UYVY: +		cfg |= (GSC_OUT_YUV422_1P | +			GSC_OUT_YUV422_1P_OEDER_LSB_C | +			GSC_OUT_CHROMA_ORDER_CBCR); +		break; +	case DRM_FORMAT_VYUY: +		cfg |= (GSC_OUT_YUV422_1P | +			GSC_OUT_YUV422_1P_OEDER_LSB_C | +			GSC_OUT_CHROMA_ORDER_CRCB); +		break; +	case DRM_FORMAT_NV21: +	case DRM_FORMAT_NV61: +		cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P); +		break; +	case DRM_FORMAT_YUV422: +	case DRM_FORMAT_YUV420: +	case DRM_FORMAT_YVU420: +		cfg |= GSC_OUT_YUV420_3P; +		break; +	case DRM_FORMAT_NV12: +	case DRM_FORMAT_NV16: +		cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | +			GSC_OUT_YUV420_2P); +		break; +	case DRM_FORMAT_NV12MT: +		cfg |= (GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE); +		break; +	default: +		dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt); +		return -EINVAL; +	} + +	gsc_write(cfg, GSC_OUT_CON); + +	return 0; +} + +static int gsc_dst_set_transf(struct device *dev, +		enum drm_exynos_degree degree, +		enum drm_exynos_flip flip, bool *swap) +{ +	struct gsc_context *ctx = get_gsc_context(dev); +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	u32 cfg; + +	DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip); + +	cfg = gsc_read(GSC_IN_CON); +	cfg &= ~GSC_IN_ROT_MASK; + +	switch (degree) { +	case EXYNOS_DRM_DEGREE_0: +		if (flip & EXYNOS_DRM_FLIP_VERTICAL) +			cfg |= GSC_IN_ROT_XFLIP; +		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) +			cfg |= GSC_IN_ROT_YFLIP; +		break; +	case EXYNOS_DRM_DEGREE_90: +		if (flip & EXYNOS_DRM_FLIP_VERTICAL) +			cfg |= GSC_IN_ROT_90_XFLIP; +		else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) +			cfg |= GSC_IN_ROT_90_YFLIP; +		else +			cfg |= GSC_IN_ROT_90; +		break; +	case EXYNOS_DRM_DEGREE_180: +		cfg |= GSC_IN_ROT_180; +		break; +	case EXYNOS_DRM_DEGREE_270: +		cfg |= GSC_IN_ROT_270; +		break; +	default: +		dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree); +		return -EINVAL; +	} + +	gsc_write(cfg, GSC_IN_CON); + +	ctx->rotation = cfg & +		(GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0; +	*swap = ctx->rotation; + +	return 0; +} + +static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio) +{ +	DRM_DEBUG_KMS("src[%d]dst[%d]\n", src, dst); + +	if (src >= dst * 8) { +		DRM_ERROR("failed to make ratio and shift.\n"); +		return -EINVAL; +	} else if (src >= dst * 4) +		*ratio = 4; +	else if (src >= dst * 2) +		*ratio = 2; +	else +		*ratio = 1; + +	return 0; +} + +static void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *shfactor) +{ +	if (hratio == 4 && vratio == 4) +		*shfactor = 4; +	else if ((hratio == 4 && vratio == 2) || +		 (hratio == 2 && vratio == 4)) +		*shfactor = 3; +	else if ((hratio == 4 && vratio == 1) || +		 (hratio == 1 && vratio == 4) || +		 (hratio == 2 && vratio == 2)) +		*shfactor = 2; +	else if (hratio == 1 && vratio == 1) +		*shfactor = 0; +	else +		*shfactor = 1; +} + +static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc, +		struct drm_exynos_pos *src, struct drm_exynos_pos *dst) +{ +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	u32 cfg; +	u32 src_w, src_h, dst_w, dst_h; +	int ret = 0; + +	src_w = src->w; +	src_h = src->h; + +	if (ctx->rotation) { +		dst_w = dst->h; +		dst_h = dst->w; +	} else { +		dst_w = dst->w; +		dst_h = dst->h; +	} + +	ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio); +	if (ret) { +		dev_err(ippdrv->dev, "failed to get ratio horizontal.\n"); +		return ret; +	} + +	ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio); +	if (ret) { +		dev_err(ippdrv->dev, "failed to get ratio vertical.\n"); +		return ret; +	} + +	DRM_DEBUG_KMS("pre_hratio[%d]pre_vratio[%d]\n", +		sc->pre_hratio, sc->pre_vratio); + +	sc->main_hratio = (src_w << 16) / dst_w; +	sc->main_vratio = (src_h << 16) / dst_h; + +	DRM_DEBUG_KMS("main_hratio[%ld]main_vratio[%ld]\n", +		sc->main_hratio, sc->main_vratio); + +	gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio, +		&sc->pre_shfactor); + +	DRM_DEBUG_KMS("pre_shfactor[%d]\n", sc->pre_shfactor); + +	cfg = (GSC_PRESC_SHFACTOR(sc->pre_shfactor) | +		GSC_PRESC_H_RATIO(sc->pre_hratio) | +		GSC_PRESC_V_RATIO(sc->pre_vratio)); +	gsc_write(cfg, GSC_PRE_SCALE_RATIO); + +	return ret; +} + +static void gsc_set_h_coef(struct gsc_context *ctx, unsigned long main_hratio) +{ +	int i, j, k, sc_ratio; + +	if (main_hratio <= GSC_SC_UP_MAX_RATIO) +		sc_ratio = 0; +	else if (main_hratio <= GSC_SC_DOWN_RATIO_7_8) +		sc_ratio = 1; +	else if (main_hratio <= GSC_SC_DOWN_RATIO_6_8) +		sc_ratio = 2; +	else if (main_hratio <= GSC_SC_DOWN_RATIO_5_8) +		sc_ratio = 3; +	else if (main_hratio <= GSC_SC_DOWN_RATIO_4_8) +		sc_ratio = 4; +	else if (main_hratio <= GSC_SC_DOWN_RATIO_3_8) +		sc_ratio = 5; +	else +		sc_ratio = 6; + +	for (i = 0; i < GSC_COEF_PHASE; i++) +		for (j = 0; j < GSC_COEF_H_8T; j++) +			for (k = 0; k < GSC_COEF_DEPTH; k++) +				gsc_write(h_coef_8t[sc_ratio][i][j], +					GSC_HCOEF(i, j, k)); +} + +static void gsc_set_v_coef(struct gsc_context *ctx, unsigned long main_vratio) +{ +	int i, j, k, sc_ratio; + +	if (main_vratio <= GSC_SC_UP_MAX_RATIO) +		sc_ratio = 0; +	else if (main_vratio <= GSC_SC_DOWN_RATIO_7_8) +		sc_ratio = 1; +	else if (main_vratio <= GSC_SC_DOWN_RATIO_6_8) +		sc_ratio = 2; +	else if (main_vratio <= GSC_SC_DOWN_RATIO_5_8) +		sc_ratio = 3; +	else if (main_vratio <= GSC_SC_DOWN_RATIO_4_8) +		sc_ratio = 4; +	else if (main_vratio <= GSC_SC_DOWN_RATIO_3_8) +		sc_ratio = 5; +	else +		sc_ratio = 6; + +	for (i = 0; i < GSC_COEF_PHASE; i++) +		for (j = 0; j < GSC_COEF_V_4T; j++) +			for (k = 0; k < GSC_COEF_DEPTH; k++) +				gsc_write(v_coef_4t[sc_ratio][i][j], +					GSC_VCOEF(i, j, k)); +} + +static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc) +{ +	u32 cfg; + +	DRM_DEBUG_KMS("main_hratio[%ld]main_vratio[%ld]\n", +		sc->main_hratio, sc->main_vratio); + +	gsc_set_h_coef(ctx, sc->main_hratio); +	cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio); +	gsc_write(cfg, GSC_MAIN_H_RATIO); + +	gsc_set_v_coef(ctx, sc->main_vratio); +	cfg = GSC_MAIN_V_RATIO_VALUE(sc->main_vratio); +	gsc_write(cfg, GSC_MAIN_V_RATIO); +} + +static int gsc_dst_set_size(struct device *dev, int swap, +		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz) +{ +	struct gsc_context *ctx = get_gsc_context(dev); +	struct drm_exynos_pos img_pos = *pos; +	struct gsc_scaler *sc = &ctx->sc; +	u32 cfg; + +	DRM_DEBUG_KMS("swap[%d]x[%d]y[%d]w[%d]h[%d]\n", +		swap, pos->x, pos->y, pos->w, pos->h); + +	if (swap) { +		img_pos.w = pos->h; +		img_pos.h = pos->w; +	} + +	/* pixel offset */ +	cfg = (GSC_DSTIMG_OFFSET_X(pos->x) | +		GSC_DSTIMG_OFFSET_Y(pos->y)); +	gsc_write(cfg, GSC_DSTIMG_OFFSET); + +	/* scaled size */ +	cfg = (GSC_SCALED_WIDTH(img_pos.w) | GSC_SCALED_HEIGHT(img_pos.h)); +	gsc_write(cfg, GSC_SCALED_SIZE); + +	DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", sz->hsize, sz->vsize); + +	/* original size */ +	cfg = gsc_read(GSC_DSTIMG_SIZE); +	cfg &= ~(GSC_DSTIMG_HEIGHT_MASK | +		GSC_DSTIMG_WIDTH_MASK); +	cfg |= (GSC_DSTIMG_WIDTH(sz->hsize) | +		GSC_DSTIMG_HEIGHT(sz->vsize)); +	gsc_write(cfg, GSC_DSTIMG_SIZE); + +	cfg = gsc_read(GSC_OUT_CON); +	cfg &= ~GSC_OUT_RGB_TYPE_MASK; + +	DRM_DEBUG_KMS("width[%d]range[%d]\n", pos->w, sc->range); + +	if (pos->w >= GSC_WIDTH_ITU_709) +		if (sc->range) +			cfg |= GSC_OUT_RGB_HD_WIDE; +		else +			cfg |= GSC_OUT_RGB_HD_NARROW; +	else +		if (sc->range) +			cfg |= GSC_OUT_RGB_SD_WIDE; +		else +			cfg |= GSC_OUT_RGB_SD_NARROW; + +	gsc_write(cfg, GSC_OUT_CON); + +	return 0; +} + +static int gsc_dst_get_buf_seq(struct gsc_context *ctx) +{ +	u32 cfg, i, buf_num = GSC_REG_SZ; +	u32 mask = 0x00000001; + +	cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK); + +	for (i = 0; i < GSC_REG_SZ; i++) +		if (cfg & (mask << i)) +			buf_num--; + +	DRM_DEBUG_KMS("buf_num[%d]\n", buf_num); + +	return buf_num; +} + +static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id, +		enum drm_exynos_ipp_buf_type buf_type) +{ +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	bool masked; +	u32 cfg; +	u32 mask = 0x00000001 << buf_id; +	int ret = 0; + +	DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type); + +	mutex_lock(&ctx->lock); + +	/* mask register set */ +	cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK); + +	switch (buf_type) { +	case IPP_BUF_ENQUEUE: +		masked = false; +		break; +	case IPP_BUF_DEQUEUE: +		masked = true; +		break; +	default: +		dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n"); +		ret =  -EINVAL; +		goto err_unlock; +	} + +	/* sequence id */ +	cfg &= ~mask; +	cfg |= masked << buf_id; +	gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK); +	gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK); +	gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK); + +	/* interrupt enable */ +	if (buf_type == IPP_BUF_ENQUEUE && +	    gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START) +		gsc_handle_irq(ctx, true, false, true); + +	/* interrupt disable */ +	if (buf_type == IPP_BUF_DEQUEUE && +	    gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP) +		gsc_handle_irq(ctx, false, false, true); + +err_unlock: +	mutex_unlock(&ctx->lock); +	return ret; +} + +static int gsc_dst_set_addr(struct device *dev, +		struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id, +		enum drm_exynos_ipp_buf_type buf_type) +{ +	struct gsc_context *ctx = get_gsc_context(dev); +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; +	struct drm_exynos_ipp_property *property; + +	if (!c_node) { +		DRM_ERROR("failed to get c_node.\n"); +		return -EFAULT; +	} + +	property = &c_node->property; + +	DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n", +		property->prop_id, buf_id, buf_type); + +	if (buf_id > GSC_MAX_DST) { +		dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id); +		return -EINVAL; +	} + +	/* address register set */ +	switch (buf_type) { +	case IPP_BUF_ENQUEUE: +		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y], +			GSC_OUT_BASE_ADDR_Y(buf_id)); +		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB], +			GSC_OUT_BASE_ADDR_CB(buf_id)); +		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR], +			GSC_OUT_BASE_ADDR_CR(buf_id)); +		break; +	case IPP_BUF_DEQUEUE: +		gsc_write(0x0, GSC_OUT_BASE_ADDR_Y(buf_id)); +		gsc_write(0x0, GSC_OUT_BASE_ADDR_CB(buf_id)); +		gsc_write(0x0, GSC_OUT_BASE_ADDR_CR(buf_id)); +		break; +	default: +		/* bypass */ +		break; +	} + +	return gsc_dst_set_buf_seq(ctx, buf_id, buf_type); +} + +static struct exynos_drm_ipp_ops gsc_dst_ops = { +	.set_fmt = gsc_dst_set_fmt, +	.set_transf = gsc_dst_set_transf, +	.set_size = gsc_dst_set_size, +	.set_addr = gsc_dst_set_addr, +}; + +static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable) +{ +	DRM_DEBUG_KMS("enable[%d]\n", enable); + +	if (enable) { +		clk_enable(ctx->gsc_clk); +		ctx->suspended = false; +	} else { +		clk_disable(ctx->gsc_clk); +		ctx->suspended = true; +	} + +	return 0; +} + +static int gsc_get_src_buf_index(struct gsc_context *ctx) +{ +	u32 cfg, curr_index, i; +	u32 buf_id = GSC_MAX_SRC; +	int ret; + +	DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id); + +	cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK); +	curr_index = GSC_IN_CURR_GET_INDEX(cfg); + +	for (i = curr_index; i < GSC_MAX_SRC; i++) { +		if (!((cfg >> i) & 0x1)) { +			buf_id = i; +			break; +		} +	} + +	if (buf_id == GSC_MAX_SRC) { +		DRM_ERROR("failed to get in buffer index.\n"); +		return -EINVAL; +	} + +	ret = gsc_src_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE); +	if (ret < 0) { +		DRM_ERROR("failed to dequeue.\n"); +		return ret; +	} + +	DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg, +		curr_index, buf_id); + +	return buf_id; +} + +static int gsc_get_dst_buf_index(struct gsc_context *ctx) +{ +	u32 cfg, curr_index, i; +	u32 buf_id = GSC_MAX_DST; +	int ret; + +	DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id); + +	cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK); +	curr_index = GSC_OUT_CURR_GET_INDEX(cfg); + +	for (i = curr_index; i < GSC_MAX_DST; i++) { +		if (!((cfg >> i) & 0x1)) { +			buf_id = i; +			break; +		} +	} + +	if (buf_id == GSC_MAX_DST) { +		DRM_ERROR("failed to get out buffer index.\n"); +		return -EINVAL; +	} + +	ret = gsc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE); +	if (ret < 0) { +		DRM_ERROR("failed to dequeue.\n"); +		return ret; +	} + +	DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg, +		curr_index, buf_id); + +	return buf_id; +} + +static irqreturn_t gsc_irq_handler(int irq, void *dev_id) +{ +	struct gsc_context *ctx = dev_id; +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; +	struct drm_exynos_ipp_event_work *event_work = +		c_node->event_work; +	u32 status; +	int buf_id[EXYNOS_DRM_OPS_MAX]; + +	DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id); + +	status = gsc_read(GSC_IRQ); +	if (status & GSC_IRQ_STATUS_OR_IRQ) { +		dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n", +			ctx->id, status); +		return IRQ_NONE; +	} + +	if (status & GSC_IRQ_STATUS_OR_FRM_DONE) { +		dev_dbg(ippdrv->dev, "occurred frame done at %d, status 0x%x.\n", +			ctx->id, status); + +		buf_id[EXYNOS_DRM_OPS_SRC] = gsc_get_src_buf_index(ctx); +		if (buf_id[EXYNOS_DRM_OPS_SRC] < 0) +			return IRQ_HANDLED; + +		buf_id[EXYNOS_DRM_OPS_DST] = gsc_get_dst_buf_index(ctx); +		if (buf_id[EXYNOS_DRM_OPS_DST] < 0) +			return IRQ_HANDLED; + +		DRM_DEBUG_KMS("buf_id_src[%d]buf_id_dst[%d]\n", +			buf_id[EXYNOS_DRM_OPS_SRC], buf_id[EXYNOS_DRM_OPS_DST]); + +		event_work->ippdrv = ippdrv; +		event_work->buf_id[EXYNOS_DRM_OPS_SRC] = +			buf_id[EXYNOS_DRM_OPS_SRC]; +		event_work->buf_id[EXYNOS_DRM_OPS_DST] = +			buf_id[EXYNOS_DRM_OPS_DST]; +		queue_work(ippdrv->event_workq, +			(struct work_struct *)event_work); +	} + +	return IRQ_HANDLED; +} + +static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv) +{ +	struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list; + +	prop_list->version = 1; +	prop_list->writeback = 1; +	prop_list->refresh_min = GSC_REFRESH_MIN; +	prop_list->refresh_max = GSC_REFRESH_MAX; +	prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) | +				(1 << EXYNOS_DRM_FLIP_HORIZONTAL); +	prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) | +				(1 << EXYNOS_DRM_DEGREE_90) | +				(1 << EXYNOS_DRM_DEGREE_180) | +				(1 << EXYNOS_DRM_DEGREE_270); +	prop_list->csc = 1; +	prop_list->crop = 1; +	prop_list->crop_max.hsize = GSC_CROP_MAX; +	prop_list->crop_max.vsize = GSC_CROP_MAX; +	prop_list->crop_min.hsize = GSC_CROP_MIN; +	prop_list->crop_min.vsize = GSC_CROP_MIN; +	prop_list->scale = 1; +	prop_list->scale_max.hsize = GSC_SCALE_MAX; +	prop_list->scale_max.vsize = GSC_SCALE_MAX; +	prop_list->scale_min.hsize = GSC_SCALE_MIN; +	prop_list->scale_min.vsize = GSC_SCALE_MIN; + +	return 0; +} + +static inline bool gsc_check_drm_flip(enum drm_exynos_flip flip) +{ +	switch (flip) { +	case EXYNOS_DRM_FLIP_NONE: +	case EXYNOS_DRM_FLIP_VERTICAL: +	case EXYNOS_DRM_FLIP_HORIZONTAL: +	case EXYNOS_DRM_FLIP_BOTH: +		return true; +	default: +		DRM_DEBUG_KMS("invalid flip\n"); +		return false; +	} +} + +static int gsc_ippdrv_check_property(struct device *dev, +		struct drm_exynos_ipp_property *property) +{ +	struct gsc_context *ctx = get_gsc_context(dev); +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	struct drm_exynos_ipp_prop_list *pp = &ippdrv->prop_list; +	struct drm_exynos_ipp_config *config; +	struct drm_exynos_pos *pos; +	struct drm_exynos_sz *sz; +	bool swap; +	int i; + +	for_each_ipp_ops(i) { +		if ((i == EXYNOS_DRM_OPS_SRC) && +			(property->cmd == IPP_CMD_WB)) +			continue; + +		config = &property->config[i]; +		pos = &config->pos; +		sz = &config->sz; + +		/* check for flip */ +		if (!gsc_check_drm_flip(config->flip)) { +			DRM_ERROR("invalid flip.\n"); +			goto err_property; +		} + +		/* check for degree */ +		switch (config->degree) { +		case EXYNOS_DRM_DEGREE_90: +		case EXYNOS_DRM_DEGREE_270: +			swap = true; +			break; +		case EXYNOS_DRM_DEGREE_0: +		case EXYNOS_DRM_DEGREE_180: +			swap = false; +			break; +		default: +			DRM_ERROR("invalid degree.\n"); +			goto err_property; +		} + +		/* check for buffer bound */ +		if ((pos->x + pos->w > sz->hsize) || +			(pos->y + pos->h > sz->vsize)) { +			DRM_ERROR("out of buf bound.\n"); +			goto err_property; +		} + +		/* check for crop */ +		if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) { +			if (swap) { +				if ((pos->h < pp->crop_min.hsize) || +					(sz->vsize > pp->crop_max.hsize) || +					(pos->w < pp->crop_min.vsize) || +					(sz->hsize > pp->crop_max.vsize)) { +					DRM_ERROR("out of crop size.\n"); +					goto err_property; +				} +			} else { +				if ((pos->w < pp->crop_min.hsize) || +					(sz->hsize > pp->crop_max.hsize) || +					(pos->h < pp->crop_min.vsize) || +					(sz->vsize > pp->crop_max.vsize)) { +					DRM_ERROR("out of crop size.\n"); +					goto err_property; +				} +			} +		} + +		/* check for scale */ +		if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) { +			if (swap) { +				if ((pos->h < pp->scale_min.hsize) || +					(sz->vsize > pp->scale_max.hsize) || +					(pos->w < pp->scale_min.vsize) || +					(sz->hsize > pp->scale_max.vsize)) { +					DRM_ERROR("out of scale size.\n"); +					goto err_property; +				} +			} else { +				if ((pos->w < pp->scale_min.hsize) || +					(sz->hsize > pp->scale_max.hsize) || +					(pos->h < pp->scale_min.vsize) || +					(sz->vsize > pp->scale_max.vsize)) { +					DRM_ERROR("out of scale size.\n"); +					goto err_property; +				} +			} +		} +	} + +	return 0; + +err_property: +	for_each_ipp_ops(i) { +		if ((i == EXYNOS_DRM_OPS_SRC) && +			(property->cmd == IPP_CMD_WB)) +			continue; + +		config = &property->config[i]; +		pos = &config->pos; +		sz = &config->sz; + +		DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n", +			i ? "dst" : "src", config->flip, config->degree, +			pos->x, pos->y, pos->w, pos->h, +			sz->hsize, sz->vsize); +	} + +	return -EINVAL; +} + + +static int gsc_ippdrv_reset(struct device *dev) +{ +	struct gsc_context *ctx = get_gsc_context(dev); +	struct gsc_scaler *sc = &ctx->sc; +	int ret; + +	/* reset h/w block */ +	ret = gsc_sw_reset(ctx); +	if (ret < 0) { +		dev_err(dev, "failed to reset hardware.\n"); +		return ret; +	} + +	/* scaler setting */ +	memset(&ctx->sc, 0x0, sizeof(ctx->sc)); +	sc->range = true; + +	return 0; +} + +static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd) +{ +	struct gsc_context *ctx = get_gsc_context(dev); +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; +	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; +	struct drm_exynos_ipp_property *property; +	struct drm_exynos_ipp_config *config; +	struct drm_exynos_pos	img_pos[EXYNOS_DRM_OPS_MAX]; +	struct drm_exynos_ipp_set_wb set_wb; +	u32 cfg; +	int ret, i; + +	DRM_DEBUG_KMS("cmd[%d]\n", cmd); + +	if (!c_node) { +		DRM_ERROR("failed to get c_node.\n"); +		return -EINVAL; +	} + +	property = &c_node->property; + +	gsc_handle_irq(ctx, true, false, true); + +	for_each_ipp_ops(i) { +		config = &property->config[i]; +		img_pos[i] = config->pos; +	} + +	switch (cmd) { +	case IPP_CMD_M2M: +		/* enable one shot */ +		cfg = gsc_read(GSC_ENABLE); +		cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK | +			GSC_ENABLE_CLK_GATE_MODE_MASK); +		cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT; +		gsc_write(cfg, GSC_ENABLE); + +		/* src dma memory */ +		cfg = gsc_read(GSC_IN_CON); +		cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK); +		cfg |= GSC_IN_PATH_MEMORY; +		gsc_write(cfg, GSC_IN_CON); + +		/* dst dma memory */ +		cfg = gsc_read(GSC_OUT_CON); +		cfg |= GSC_OUT_PATH_MEMORY; +		gsc_write(cfg, GSC_OUT_CON); +		break; +	case IPP_CMD_WB: +		set_wb.enable = 1; +		set_wb.refresh = property->refresh_rate; +		gsc_set_gscblk_fimd_wb(ctx, set_wb.enable); +		exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb); + +		/* src local path */ +		cfg = gsc_read(GSC_IN_CON); +		cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK); +		cfg |= (GSC_IN_PATH_LOCAL | GSC_IN_LOCAL_FIMD_WB); +		gsc_write(cfg, GSC_IN_CON); + +		/* dst dma memory */ +		cfg = gsc_read(GSC_OUT_CON); +		cfg |= GSC_OUT_PATH_MEMORY; +		gsc_write(cfg, GSC_OUT_CON); +		break; +	case IPP_CMD_OUTPUT: +		/* src dma memory */ +		cfg = gsc_read(GSC_IN_CON); +		cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK); +		cfg |= GSC_IN_PATH_MEMORY; +		gsc_write(cfg, GSC_IN_CON); + +		/* dst local path */ +		cfg = gsc_read(GSC_OUT_CON); +		cfg |= GSC_OUT_PATH_MEMORY; +		gsc_write(cfg, GSC_OUT_CON); +		break; +	default: +		ret = -EINVAL; +		dev_err(dev, "invalid operations.\n"); +		return ret; +	} + +	ret = gsc_set_prescaler(ctx, &ctx->sc, +		&img_pos[EXYNOS_DRM_OPS_SRC], +		&img_pos[EXYNOS_DRM_OPS_DST]); +	if (ret) { +		dev_err(dev, "failed to set precalser.\n"); +		return ret; +	} + +	gsc_set_scaler(ctx, &ctx->sc); + +	cfg = gsc_read(GSC_ENABLE); +	cfg |= GSC_ENABLE_ON; +	gsc_write(cfg, GSC_ENABLE); + +	return 0; +} + +static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd) +{ +	struct gsc_context *ctx = get_gsc_context(dev); +	struct drm_exynos_ipp_set_wb set_wb = {0, 0}; +	u32 cfg; + +	DRM_DEBUG_KMS("cmd[%d]\n", cmd); + +	switch (cmd) { +	case IPP_CMD_M2M: +		/* bypass */ +		break; +	case IPP_CMD_WB: +		gsc_set_gscblk_fimd_wb(ctx, set_wb.enable); +		exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb); +		break; +	case IPP_CMD_OUTPUT: +	default: +		dev_err(dev, "invalid operations.\n"); +		break; +	} + +	gsc_handle_irq(ctx, false, false, true); + +	/* reset sequence */ +	gsc_write(0xff, GSC_OUT_BASE_ADDR_Y_MASK); +	gsc_write(0xff, GSC_OUT_BASE_ADDR_CB_MASK); +	gsc_write(0xff, GSC_OUT_BASE_ADDR_CR_MASK); + +	cfg = gsc_read(GSC_ENABLE); +	cfg &= ~GSC_ENABLE_ON; +	gsc_write(cfg, GSC_ENABLE); +} + +static int gsc_probe(struct platform_device *pdev) +{ +	struct device *dev = &pdev->dev; +	struct gsc_context *ctx; +	struct resource *res; +	struct exynos_drm_ippdrv *ippdrv; +	int ret; + +	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); +	if (!ctx) +		return -ENOMEM; + +	/* clock control */ +	ctx->gsc_clk = devm_clk_get(dev, "gscl"); +	if (IS_ERR(ctx->gsc_clk)) { +		dev_err(dev, "failed to get gsc clock.\n"); +		return PTR_ERR(ctx->gsc_clk); +	} + +	/* resource memory */ +	ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	ctx->regs = devm_ioremap_resource(dev, ctx->regs_res); +	if (IS_ERR(ctx->regs)) +		return PTR_ERR(ctx->regs); + +	/* resource irq */ +	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); +	if (!res) { +		dev_err(dev, "failed to request irq resource.\n"); +		return -ENOENT; +	} + +	ctx->irq = res->start; +	ret = devm_request_threaded_irq(dev, ctx->irq, NULL, gsc_irq_handler, +		IRQF_ONESHOT, "drm_gsc", ctx); +	if (ret < 0) { +		dev_err(dev, "failed to request irq.\n"); +		return ret; +	} + +	/* context initailization */ +	ctx->id = pdev->id; + +	ippdrv = &ctx->ippdrv; +	ippdrv->dev = dev; +	ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &gsc_src_ops; +	ippdrv->ops[EXYNOS_DRM_OPS_DST] = &gsc_dst_ops; +	ippdrv->check_property = gsc_ippdrv_check_property; +	ippdrv->reset = gsc_ippdrv_reset; +	ippdrv->start = gsc_ippdrv_start; +	ippdrv->stop = gsc_ippdrv_stop; +	ret = gsc_init_prop_list(ippdrv); +	if (ret < 0) { +		dev_err(dev, "failed to init property list.\n"); +		return ret; +	} + +	DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv); + +	mutex_init(&ctx->lock); +	platform_set_drvdata(pdev, ctx); + +	pm_runtime_set_active(dev); +	pm_runtime_enable(dev); + +	ret = exynos_drm_ippdrv_register(ippdrv); +	if (ret < 0) { +		dev_err(dev, "failed to register drm gsc device.\n"); +		goto err_ippdrv_register; +	} + +	dev_info(dev, "drm gsc registered successfully.\n"); + +	return 0; + +err_ippdrv_register: +	pm_runtime_disable(dev); +	return ret; +} + +static int gsc_remove(struct platform_device *pdev) +{ +	struct device *dev = &pdev->dev; +	struct gsc_context *ctx = get_gsc_context(dev); +	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; + +	exynos_drm_ippdrv_unregister(ippdrv); +	mutex_destroy(&ctx->lock); + +	pm_runtime_set_suspended(dev); +	pm_runtime_disable(dev); + +	return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int gsc_suspend(struct device *dev) +{ +	struct gsc_context *ctx = get_gsc_context(dev); + +	DRM_DEBUG_KMS("id[%d]\n", ctx->id); + +	if (pm_runtime_suspended(dev)) +		return 0; + +	return gsc_clk_ctrl(ctx, false); +} + +static int gsc_resume(struct device *dev) +{ +	struct gsc_context *ctx = get_gsc_context(dev); + +	DRM_DEBUG_KMS("id[%d]\n", ctx->id); + +	if (!pm_runtime_suspended(dev)) +		return gsc_clk_ctrl(ctx, true); + +	return 0; +} +#endif + +#ifdef CONFIG_PM_RUNTIME +static int gsc_runtime_suspend(struct device *dev) +{ +	struct gsc_context *ctx = get_gsc_context(dev); + +	DRM_DEBUG_KMS("id[%d]\n", ctx->id); + +	return  gsc_clk_ctrl(ctx, false); +} + +static int gsc_runtime_resume(struct device *dev) +{ +	struct gsc_context *ctx = get_gsc_context(dev); + +	DRM_DEBUG_KMS("id[%d]\n", ctx->id); + +	return  gsc_clk_ctrl(ctx, true); +} +#endif + +static const struct dev_pm_ops gsc_pm_ops = { +	SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume) +	SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL) +}; + +struct platform_driver gsc_driver = { +	.probe		= gsc_probe, +	.remove		= gsc_remove, +	.driver		= { +		.name	= "exynos-drm-gsc", +		.owner	= THIS_MODULE, +		.pm	= &gsc_pm_ops, +	}, +}; + diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.h b/drivers/gpu/drm/exynos/exynos_drm_gsc.h new file mode 100644 index 00000000000..29ec1c5efcf --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * + * Authors: + *	Eunchul Kim <chulspro.kim@samsung.com> + *	Jinyoung Jeon <jy0.jeon@samsung.com> + *	Sangmin Lee <lsmin.lee@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#ifndef _EXYNOS_DRM_GSC_H_ +#define _EXYNOS_DRM_GSC_H_ + +/* + * TODO + * FIMD output interface notifier callback. + * Mixer output interface notifier callback. + */ + +#endif /* _EXYNOS_DRM_GSC_H_ */ diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c new file mode 100644 index 00000000000..b32b291f88f --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c @@ -0,0 +1,143 @@ +/* exynos_drm_iommu.c + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * Author: Inki Dae <inki.dae@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#include <drmP.h> +#include <drm/exynos_drm.h> + +#include <linux/dma-mapping.h> +#include <linux/iommu.h> +#include <linux/kref.h> + +#include <asm/dma-iommu.h> + +#include "exynos_drm_drv.h" +#include "exynos_drm_iommu.h" + +/* + * drm_create_iommu_mapping - create a mapping structure + * + * @drm_dev: DRM device + */ +int drm_create_iommu_mapping(struct drm_device *drm_dev) +{ +	struct dma_iommu_mapping *mapping = NULL; +	struct exynos_drm_private *priv = drm_dev->dev_private; +	struct device *dev = drm_dev->dev; + +	if (!priv->da_start) +		priv->da_start = EXYNOS_DEV_ADDR_START; +	if (!priv->da_space_size) +		priv->da_space_size = EXYNOS_DEV_ADDR_SIZE; + +	mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start, +						priv->da_space_size); + +	if (IS_ERR(mapping)) +		return PTR_ERR(mapping); + +	dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), +					GFP_KERNEL); +	if (!dev->dma_parms) +		goto error; + +	dma_set_max_seg_size(dev, 0xffffffffu); +	dev->archdata.mapping = mapping; + +	return 0; +error: +	arm_iommu_release_mapping(mapping); +	return -ENOMEM; +} + +/* + * drm_release_iommu_mapping - release iommu mapping structure + * + * @drm_dev: DRM device + * + * if mapping->kref becomes 0 then all things related to iommu mapping + * will be released + */ +void drm_release_iommu_mapping(struct drm_device *drm_dev) +{ +	struct device *dev = drm_dev->dev; + +	arm_iommu_release_mapping(dev->archdata.mapping); +} + +/* + * drm_iommu_attach_device- attach device to iommu mapping + * + * @drm_dev: DRM device + * @subdrv_dev: device to be attach + * + * This function should be called by sub drivers to attach it to iommu + * mapping. + */ +int drm_iommu_attach_device(struct drm_device *drm_dev, +				struct device *subdrv_dev) +{ +	struct device *dev = drm_dev->dev; +	int ret; + +	if (!dev->archdata.mapping) { +		DRM_ERROR("iommu_mapping is null.\n"); +		return -EFAULT; +	} + +	subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev, +					sizeof(*subdrv_dev->dma_parms), +					GFP_KERNEL); +	if (!subdrv_dev->dma_parms) +		return -ENOMEM; + +	dma_set_max_seg_size(subdrv_dev, 0xffffffffu); + +	ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping); +	if (ret < 0) { +		DRM_DEBUG_KMS("failed iommu attach.\n"); +		return ret; +	} + +	/* +	 * Set dma_ops to drm_device just one time. +	 * +	 * The dma mapping api needs device object and the api is used +	 * to allocate physial memory and map it with iommu table. +	 * If iommu attach succeeded, the sub driver would have dma_ops +	 * for iommu and also all sub drivers have same dma_ops. +	 */ +	if (!dev->archdata.dma_ops) +		dev->archdata.dma_ops = subdrv_dev->archdata.dma_ops; + +	return 0; +} + +/* + * drm_iommu_detach_device -detach device address space mapping from device + * + * @drm_dev: DRM device + * @subdrv_dev: device to be detached + * + * This function should be called by sub drivers to detach it from iommu + * mapping + */ +void drm_iommu_detach_device(struct drm_device *drm_dev, +				struct device *subdrv_dev) +{ +	struct device *dev = drm_dev->dev; +	struct dma_iommu_mapping *mapping = dev->archdata.mapping; + +	if (!mapping || !mapping->domain) +		return; + +	iommu_detach_device(mapping->domain, subdrv_dev); +	drm_release_iommu_mapping(drm_dev); +} diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h new file mode 100644 index 00000000000..72376d41c51 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h @@ -0,0 +1,70 @@ +/* exynos_drm_iommu.h + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * Authoer: Inki Dae <inki.dae@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#ifndef _EXYNOS_DRM_IOMMU_H_ +#define _EXYNOS_DRM_IOMMU_H_ + +#define EXYNOS_DEV_ADDR_START	0x20000000 +#define EXYNOS_DEV_ADDR_SIZE	0x40000000 + +#ifdef CONFIG_DRM_EXYNOS_IOMMU + +int drm_create_iommu_mapping(struct drm_device *drm_dev); + +void drm_release_iommu_mapping(struct drm_device *drm_dev); + +int drm_iommu_attach_device(struct drm_device *drm_dev, +				struct device *subdrv_dev); + +void drm_iommu_detach_device(struct drm_device *dev_dev, +				struct device *subdrv_dev); + +static inline bool is_drm_iommu_supported(struct drm_device *drm_dev) +{ +#ifdef CONFIG_ARM_DMA_USE_IOMMU +	struct device *dev = drm_dev->dev; + +	return dev->archdata.mapping ? true : false; +#else +	return false; +#endif +} + +#else + +struct dma_iommu_mapping; +static inline int drm_create_iommu_mapping(struct drm_device *drm_dev) +{ +	return 0; +} + +static inline void drm_release_iommu_mapping(struct drm_device *drm_dev) +{ +} + +static inline int drm_iommu_attach_device(struct drm_device *drm_dev, +						struct device *subdrv_dev) +{ +	return 0; +} + +static inline void drm_iommu_detach_device(struct drm_device *drm_dev, +						struct device *subdrv_dev) +{ +} + +static inline bool is_drm_iommu_supported(struct drm_device *drm_dev) +{ +	return false; +} + +#endif +#endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c new file mode 100644 index 00000000000..a1888e128f1 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c @@ -0,0 +1,1989 @@ +/* + * Copyright (C) 2012 Samsung Electronics Co.Ltd + * Authors: + *	Eunchul Kim <chulspro.kim@samsung.com> + *	Jinyoung Jeon <jy0.jeon@samsung.com> + *	Sangmin Lee <lsmin.lee@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + * + */ +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <linux/clk.h> +#include <linux/pm_runtime.h> + +#include <drm/drmP.h> +#include <drm/exynos_drm.h> +#include "exynos_drm_drv.h" +#include "exynos_drm_gem.h" +#include "exynos_drm_ipp.h" +#include "exynos_drm_iommu.h" + +/* + * IPP stands for Image Post Processing and + * supports image scaler/rotator and input/output DMA operations. + * using FIMC, GSC, Rotator, so on. + * IPP is integration device driver of same attribute h/w + */ + +/* + * TODO + * 1. expand command control id. + * 2. integrate	property and config. + * 3. removed send_event id check routine. + * 4. compare send_event id if needed. + * 5. free subdrv_remove notifier callback list if needed. + * 6. need to check subdrv_open about multi-open. + * 7. need to power_on implement power and sysmmu ctrl. + */ + +#define get_ipp_context(dev)	platform_get_drvdata(to_platform_device(dev)) +#define ipp_is_m2m_cmd(c)	(c == IPP_CMD_M2M) + +/* platform device pointer for ipp device. */ +static struct platform_device *exynos_drm_ipp_pdev; + +/* + * A structure of event. + * + * @base: base of event. + * @event: ipp event. + */ +struct drm_exynos_ipp_send_event { +	struct drm_pending_event	base; +	struct drm_exynos_ipp_event	event; +}; + +/* + * A structure of memory node. + * + * @list: list head to memory queue information. + * @ops_id: id of operations. + * @prop_id: id of property. + * @buf_id: id of buffer. + * @buf_info: gem objects and dma address, size. + * @filp: a pointer to drm_file. + */ +struct drm_exynos_ipp_mem_node { +	struct list_head	list; +	enum drm_exynos_ops_id	ops_id; +	u32	prop_id; +	u32	buf_id; +	struct drm_exynos_ipp_buf_info	buf_info; +	struct drm_file		*filp; +}; + +/* + * A structure of ipp context. + * + * @subdrv: prepare initialization using subdrv. + * @ipp_lock: lock for synchronization of access to ipp_idr. + * @prop_lock: lock for synchronization of access to prop_idr. + * @ipp_idr: ipp driver idr. + * @prop_idr: property idr. + * @event_workq: event work queue. + * @cmd_workq: command work queue. + */ +struct ipp_context { +	struct exynos_drm_subdrv	subdrv; +	struct mutex	ipp_lock; +	struct mutex	prop_lock; +	struct idr	ipp_idr; +	struct idr	prop_idr; +	struct workqueue_struct	*event_workq; +	struct workqueue_struct	*cmd_workq; +}; + +static LIST_HEAD(exynos_drm_ippdrv_list); +static DEFINE_MUTEX(exynos_drm_ippdrv_lock); +static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list); + +int exynos_platform_device_ipp_register(void) +{ +	struct platform_device *pdev; + +	if (exynos_drm_ipp_pdev) +		return -EEXIST; + +	pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0); +	if (IS_ERR(pdev)) +		return PTR_ERR(pdev); + +	exynos_drm_ipp_pdev = pdev; + +	return 0; +} + +void exynos_platform_device_ipp_unregister(void) +{ +	if (exynos_drm_ipp_pdev) { +		platform_device_unregister(exynos_drm_ipp_pdev); +		exynos_drm_ipp_pdev = NULL; +	} +} + +int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv) +{ +	if (!ippdrv) +		return -EINVAL; + +	mutex_lock(&exynos_drm_ippdrv_lock); +	list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list); +	mutex_unlock(&exynos_drm_ippdrv_lock); + +	return 0; +} + +int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv) +{ +	if (!ippdrv) +		return -EINVAL; + +	mutex_lock(&exynos_drm_ippdrv_lock); +	list_del(&ippdrv->drv_list); +	mutex_unlock(&exynos_drm_ippdrv_lock); + +	return 0; +} + +static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj, +		u32 *idp) +{ +	int ret; + +	/* do the allocation under our mutexlock */ +	mutex_lock(lock); +	ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL); +	mutex_unlock(lock); +	if (ret < 0) +		return ret; + +	*idp = ret; +	return 0; +} + +static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id) +{ +	mutex_lock(lock); +	idr_remove(id_idr, id); +	mutex_unlock(lock); +} + +static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id) +{ +	void *obj; + +	DRM_DEBUG_KMS("id[%d]\n", id); + +	mutex_lock(lock); + +	/* find object using handle */ +	obj = idr_find(id_idr, id); +	if (!obj) { +		DRM_ERROR("failed to find object.\n"); +		mutex_unlock(lock); +		return ERR_PTR(-ENODEV); +	} + +	mutex_unlock(lock); + +	return obj; +} + +static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv, +		enum drm_exynos_ipp_cmd	cmd) +{ +	/* +	 * check dedicated flag and WB, OUTPUT operation with +	 * power on state. +	 */ +	if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) && +	    !pm_runtime_suspended(ippdrv->dev))) +		return true; + +	return false; +} + +static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx, +		struct drm_exynos_ipp_property *property) +{ +	struct exynos_drm_ippdrv *ippdrv; +	u32 ipp_id = property->ipp_id; + +	DRM_DEBUG_KMS("ipp_id[%d]\n", ipp_id); + +	if (ipp_id) { +		/* find ipp driver using idr */ +		ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, +			ipp_id); +		if (IS_ERR(ippdrv)) { +			DRM_ERROR("not found ipp%d driver.\n", ipp_id); +			return ippdrv; +		} + +		/* +		 * WB, OUTPUT opertion not supported multi-operation. +		 * so, make dedicated state at set property ioctl. +		 * when ipp driver finished operations, clear dedicated flags. +		 */ +		if (ipp_check_dedicated(ippdrv, property->cmd)) { +			DRM_ERROR("already used choose device.\n"); +			return ERR_PTR(-EBUSY); +		} + +		/* +		 * This is necessary to find correct device in ipp drivers. +		 * ipp drivers have different abilities, +		 * so need to check property. +		 */ +		if (ippdrv->check_property && +		    ippdrv->check_property(ippdrv->dev, property)) { +			DRM_ERROR("not support property.\n"); +			return ERR_PTR(-EINVAL); +		} + +		return ippdrv; +	} else { +		/* +		 * This case is search all ipp driver for finding. +		 * user application don't set ipp_id in this case, +		 * so ipp subsystem search correct driver in driver list. +		 */ +		list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { +			if (ipp_check_dedicated(ippdrv, property->cmd)) { +				DRM_DEBUG_KMS("used device.\n"); +				continue; +			} + +			if (ippdrv->check_property && +			    ippdrv->check_property(ippdrv->dev, property)) { +				DRM_DEBUG_KMS("not support property.\n"); +				continue; +			} + +			return ippdrv; +		} + +		DRM_ERROR("not support ipp driver operations.\n"); +	} + +	return ERR_PTR(-ENODEV); +} + +static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id) +{ +	struct exynos_drm_ippdrv *ippdrv; +	struct drm_exynos_ipp_cmd_node *c_node; +	int count = 0; + +	DRM_DEBUG_KMS("prop_id[%d]\n", prop_id); + +	/* +	 * This case is search ipp driver by prop_id handle. +	 * sometimes, ipp subsystem find driver by prop_id. +	 * e.g PAUSE state, queue buf, command control. +	 */ +	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { +		DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv); + +		mutex_lock(&ippdrv->cmd_lock); +		list_for_each_entry(c_node, &ippdrv->cmd_list, list) { +			if (c_node->property.prop_id == prop_id) { +				mutex_unlock(&ippdrv->cmd_lock); +				return ippdrv; +			} +		} +		mutex_unlock(&ippdrv->cmd_lock); +	} + +	return ERR_PTR(-ENODEV); +} + +int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data, +		struct drm_file *file) +{ +	struct drm_exynos_file_private *file_priv = file->driver_priv; +	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; +	struct device *dev = priv->dev; +	struct ipp_context *ctx = get_ipp_context(dev); +	struct drm_exynos_ipp_prop_list *prop_list = data; +	struct exynos_drm_ippdrv *ippdrv; +	int count = 0; + +	if (!ctx) { +		DRM_ERROR("invalid context.\n"); +		return -EINVAL; +	} + +	if (!prop_list) { +		DRM_ERROR("invalid property parameter.\n"); +		return -EINVAL; +	} + +	DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id); + +	if (!prop_list->ipp_id) { +		list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) +			count++; + +		/* +		 * Supports ippdrv list count for user application. +		 * First step user application getting ippdrv count. +		 * and second step getting ippdrv capability using ipp_id. +		 */ +		prop_list->count = count; +	} else { +		/* +		 * Getting ippdrv capability by ipp_id. +		 * some device not supported wb, output interface. +		 * so, user application detect correct ipp driver +		 * using this ioctl. +		 */ +		ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, +						prop_list->ipp_id); +		if (IS_ERR(ippdrv)) { +			DRM_ERROR("not found ipp%d driver.\n", +					prop_list->ipp_id); +			return PTR_ERR(ippdrv); +		} + +		*prop_list = ippdrv->prop_list; +	} + +	return 0; +} + +static void ipp_print_property(struct drm_exynos_ipp_property *property, +		int idx) +{ +	struct drm_exynos_ipp_config *config = &property->config[idx]; +	struct drm_exynos_pos *pos = &config->pos; +	struct drm_exynos_sz *sz = &config->sz; + +	DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n", +		property->prop_id, idx ? "dst" : "src", config->fmt); + +	DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n", +		pos->x, pos->y, pos->w, pos->h, +		sz->hsize, sz->vsize, config->flip, config->degree); +} + +static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property) +{ +	struct exynos_drm_ippdrv *ippdrv; +	struct drm_exynos_ipp_cmd_node *c_node; +	u32 prop_id = property->prop_id; + +	DRM_DEBUG_KMS("prop_id[%d]\n", prop_id); + +	ippdrv = ipp_find_drv_by_handle(prop_id); +	if (IS_ERR(ippdrv)) { +		DRM_ERROR("failed to get ipp driver.\n"); +		return -EINVAL; +	} + +	/* +	 * Find command node using command list in ippdrv. +	 * when we find this command no using prop_id. +	 * return property information set in this command node. +	 */ +	mutex_lock(&ippdrv->cmd_lock); +	list_for_each_entry(c_node, &ippdrv->cmd_list, list) { +		if ((c_node->property.prop_id == prop_id) && +		    (c_node->state == IPP_STATE_STOP)) { +			mutex_unlock(&ippdrv->cmd_lock); +			DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n", +				property->cmd, (int)ippdrv); + +			c_node->property = *property; +			return 0; +		} +	} +	mutex_unlock(&ippdrv->cmd_lock); + +	DRM_ERROR("failed to search property.\n"); + +	return -EINVAL; +} + +static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void) +{ +	struct drm_exynos_ipp_cmd_work *cmd_work; + +	cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL); +	if (!cmd_work) +		return ERR_PTR(-ENOMEM); + +	INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd); + +	return cmd_work; +} + +static struct drm_exynos_ipp_event_work *ipp_create_event_work(void) +{ +	struct drm_exynos_ipp_event_work *event_work; + +	event_work = kzalloc(sizeof(*event_work), GFP_KERNEL); +	if (!event_work) +		return ERR_PTR(-ENOMEM); + +	INIT_WORK((struct work_struct *)event_work, ipp_sched_event); + +	return event_work; +} + +int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data, +		struct drm_file *file) +{ +	struct drm_exynos_file_private *file_priv = file->driver_priv; +	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; +	struct device *dev = priv->dev; +	struct ipp_context *ctx = get_ipp_context(dev); +	struct drm_exynos_ipp_property *property = data; +	struct exynos_drm_ippdrv *ippdrv; +	struct drm_exynos_ipp_cmd_node *c_node; +	int ret, i; + +	if (!ctx) { +		DRM_ERROR("invalid context.\n"); +		return -EINVAL; +	} + +	if (!property) { +		DRM_ERROR("invalid property parameter.\n"); +		return -EINVAL; +	} + +	/* +	 * This is log print for user application property. +	 * user application set various property. +	 */ +	for_each_ipp_ops(i) +		ipp_print_property(property, i); + +	/* +	 * set property ioctl generated new prop_id. +	 * but in this case already asigned prop_id using old set property. +	 * e.g PAUSE state. this case supports find current prop_id and use it +	 * instead of allocation. +	 */ +	if (property->prop_id) { +		DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); +		return ipp_find_and_set_property(property); +	} + +	/* find ipp driver using ipp id */ +	ippdrv = ipp_find_driver(ctx, property); +	if (IS_ERR(ippdrv)) { +		DRM_ERROR("failed to get ipp driver.\n"); +		return -EINVAL; +	} + +	/* allocate command node */ +	c_node = kzalloc(sizeof(*c_node), GFP_KERNEL); +	if (!c_node) +		return -ENOMEM; + +	/* create property id */ +	ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node, +		&property->prop_id); +	if (ret) { +		DRM_ERROR("failed to create id.\n"); +		goto err_clear; +	} + +	DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n", +		property->prop_id, property->cmd, (int)ippdrv); + +	/* stored property information and ippdrv in private data */ +	c_node->priv = priv; +	c_node->property = *property; +	c_node->state = IPP_STATE_IDLE; + +	c_node->start_work = ipp_create_cmd_work(); +	if (IS_ERR(c_node->start_work)) { +		DRM_ERROR("failed to create start work.\n"); +		goto err_remove_id; +	} + +	c_node->stop_work = ipp_create_cmd_work(); +	if (IS_ERR(c_node->stop_work)) { +		DRM_ERROR("failed to create stop work.\n"); +		goto err_free_start; +	} + +	c_node->event_work = ipp_create_event_work(); +	if (IS_ERR(c_node->event_work)) { +		DRM_ERROR("failed to create event work.\n"); +		goto err_free_stop; +	} + +	mutex_init(&c_node->lock); +	mutex_init(&c_node->mem_lock); +	mutex_init(&c_node->event_lock); + +	init_completion(&c_node->start_complete); +	init_completion(&c_node->stop_complete); + +	for_each_ipp_ops(i) +		INIT_LIST_HEAD(&c_node->mem_list[i]); + +	INIT_LIST_HEAD(&c_node->event_list); +	list_splice_init(&priv->event_list, &c_node->event_list); +	mutex_lock(&ippdrv->cmd_lock); +	list_add_tail(&c_node->list, &ippdrv->cmd_list); +	mutex_unlock(&ippdrv->cmd_lock); + +	/* make dedicated state without m2m */ +	if (!ipp_is_m2m_cmd(property->cmd)) +		ippdrv->dedicated = true; + +	return 0; + +err_free_stop: +	kfree(c_node->stop_work); +err_free_start: +	kfree(c_node->start_work); +err_remove_id: +	ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, property->prop_id); +err_clear: +	kfree(c_node); +	return ret; +} + +static void ipp_clean_cmd_node(struct ipp_context *ctx, +				struct drm_exynos_ipp_cmd_node *c_node) +{ +	/* delete list */ +	list_del(&c_node->list); + +	ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, +			c_node->property.prop_id); + +	/* destroy mutex */ +	mutex_destroy(&c_node->lock); +	mutex_destroy(&c_node->mem_lock); +	mutex_destroy(&c_node->event_lock); + +	/* free command node */ +	kfree(c_node->start_work); +	kfree(c_node->stop_work); +	kfree(c_node->event_work); +	kfree(c_node); +} + +static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node) +{ +	struct drm_exynos_ipp_property *property = &c_node->property; +	struct drm_exynos_ipp_mem_node *m_node; +	struct list_head *head; +	int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, }; + +	for_each_ipp_ops(i) { +		/* source/destination memory list */ +		head = &c_node->mem_list[i]; + +		/* find memory node entry */ +		list_for_each_entry(m_node, head, list) { +			DRM_DEBUG_KMS("%s,count[%d]m_node[0x%x]\n", +				i ? "dst" : "src", count[i], (int)m_node); +			count[i]++; +		} +	} + +	DRM_DEBUG_KMS("min[%d]max[%d]\n", +		min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]), +		max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST])); + +	/* +	 * M2M operations should be need paired memory address. +	 * so, need to check minimum count about src, dst. +	 * other case not use paired memory, so use maximum count +	 */ +	if (ipp_is_m2m_cmd(property->cmd)) +		ret = min(count[EXYNOS_DRM_OPS_SRC], +			count[EXYNOS_DRM_OPS_DST]); +	else +		ret = max(count[EXYNOS_DRM_OPS_SRC], +			count[EXYNOS_DRM_OPS_DST]); + +	return ret; +} + +static struct drm_exynos_ipp_mem_node +		*ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node, +		struct drm_exynos_ipp_queue_buf *qbuf) +{ +	struct drm_exynos_ipp_mem_node *m_node; +	struct list_head *head; +	int count = 0; + +	DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id); + +	/* source/destination memory list */ +	head = &c_node->mem_list[qbuf->ops_id]; + +	/* find memory node from memory list */ +	list_for_each_entry(m_node, head, list) { +		DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node); + +		/* compare buffer id */ +		if (m_node->buf_id == qbuf->buf_id) +			return m_node; +	} + +	return NULL; +} + +static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv, +		struct drm_exynos_ipp_cmd_node *c_node, +		struct drm_exynos_ipp_mem_node *m_node) +{ +	struct exynos_drm_ipp_ops *ops = NULL; +	int ret = 0; + +	DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); + +	if (!m_node) { +		DRM_ERROR("invalid queue node.\n"); +		return -EFAULT; +	} + +	DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id); + +	/* get operations callback */ +	ops = ippdrv->ops[m_node->ops_id]; +	if (!ops) { +		DRM_ERROR("not support ops.\n"); +		return -EFAULT; +	} + +	/* set address and enable irq */ +	if (ops->set_addr) { +		ret = ops->set_addr(ippdrv->dev, &m_node->buf_info, +			m_node->buf_id, IPP_BUF_ENQUEUE); +		if (ret) { +			DRM_ERROR("failed to set addr.\n"); +			return ret; +		} +	} + +	return ret; +} + +static struct drm_exynos_ipp_mem_node +		*ipp_get_mem_node(struct drm_device *drm_dev, +		struct drm_file *file, +		struct drm_exynos_ipp_cmd_node *c_node, +		struct drm_exynos_ipp_queue_buf *qbuf) +{ +	struct drm_exynos_ipp_mem_node *m_node; +	struct drm_exynos_ipp_buf_info buf_info; +	void *addr; +	int i; + +	m_node = kzalloc(sizeof(*m_node), GFP_KERNEL); +	if (!m_node) +		return ERR_PTR(-ENOMEM); + +	/* clear base address for error handling */ +	memset(&buf_info, 0x0, sizeof(buf_info)); + +	/* operations, buffer id */ +	m_node->ops_id = qbuf->ops_id; +	m_node->prop_id = qbuf->prop_id; +	m_node->buf_id = qbuf->buf_id; + +	DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id); +	DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id); + +	for_each_ipp_planar(i) { +		DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]); + +		/* get dma address by handle */ +		if (qbuf->handle[i]) { +			addr = exynos_drm_gem_get_dma_addr(drm_dev, +					qbuf->handle[i], file); +			if (IS_ERR(addr)) { +				DRM_ERROR("failed to get addr.\n"); +				goto err_clear; +			} + +			buf_info.handles[i] = qbuf->handle[i]; +			buf_info.base[i] = *(dma_addr_t *) addr; +			DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%x]\n", +				i, buf_info.base[i], (int)buf_info.handles[i]); +		} +	} + +	m_node->filp = file; +	m_node->buf_info = buf_info; +	mutex_lock(&c_node->mem_lock); +	list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]); +	mutex_unlock(&c_node->mem_lock); + +	return m_node; + +err_clear: +	kfree(m_node); +	return ERR_PTR(-EFAULT); +} + +static int ipp_put_mem_node(struct drm_device *drm_dev, +		struct drm_exynos_ipp_cmd_node *c_node, +		struct drm_exynos_ipp_mem_node *m_node) +{ +	int i; + +	DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); + +	if (!m_node) { +		DRM_ERROR("invalid dequeue node.\n"); +		return -EFAULT; +	} + +	DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id); + +	/* put gem buffer */ +	for_each_ipp_planar(i) { +		unsigned long handle = m_node->buf_info.handles[i]; +		if (handle) +			exynos_drm_gem_put_dma_addr(drm_dev, handle, +							m_node->filp); +	} + +	/* delete list in queue */ +	list_del(&m_node->list); +	kfree(m_node); + +	return 0; +} + +static void ipp_free_event(struct drm_pending_event *event) +{ +	kfree(event); +} + +static int ipp_get_event(struct drm_device *drm_dev, +		struct drm_file *file, +		struct drm_exynos_ipp_cmd_node *c_node, +		struct drm_exynos_ipp_queue_buf *qbuf) +{ +	struct drm_exynos_ipp_send_event *e; +	unsigned long flags; + +	DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id); + +	e = kzalloc(sizeof(*e), GFP_KERNEL); +	if (!e) { +		spin_lock_irqsave(&drm_dev->event_lock, flags); +		file->event_space += sizeof(e->event); +		spin_unlock_irqrestore(&drm_dev->event_lock, flags); +		return -ENOMEM; +	} + +	/* make event */ +	e->event.base.type = DRM_EXYNOS_IPP_EVENT; +	e->event.base.length = sizeof(e->event); +	e->event.user_data = qbuf->user_data; +	e->event.prop_id = qbuf->prop_id; +	e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id; +	e->base.event = &e->event.base; +	e->base.file_priv = file; +	e->base.destroy = ipp_free_event; +	mutex_lock(&c_node->event_lock); +	list_add_tail(&e->base.link, &c_node->event_list); +	mutex_unlock(&c_node->event_lock); + +	return 0; +} + +static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node, +		struct drm_exynos_ipp_queue_buf *qbuf) +{ +	struct drm_exynos_ipp_send_event *e, *te; +	int count = 0; + +	mutex_lock(&c_node->event_lock); +	list_for_each_entry_safe(e, te, &c_node->event_list, base.link) { +		DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e); + +		/* +		 * qbuf == NULL condition means all event deletion. +		 * stop operations want to delete all event list. +		 * another case delete only same buf id. +		 */ +		if (!qbuf) { +			/* delete list */ +			list_del(&e->base.link); +			kfree(e); +		} + +		/* compare buffer id */ +		if (qbuf && (qbuf->buf_id == +		    e->event.buf_id[EXYNOS_DRM_OPS_DST])) { +			/* delete list */ +			list_del(&e->base.link); +			kfree(e); +			goto out_unlock; +		} +	} + +out_unlock: +	mutex_unlock(&c_node->event_lock); +	return; +} + +static void ipp_handle_cmd_work(struct device *dev, +		struct exynos_drm_ippdrv *ippdrv, +		struct drm_exynos_ipp_cmd_work *cmd_work, +		struct drm_exynos_ipp_cmd_node *c_node) +{ +	struct ipp_context *ctx = get_ipp_context(dev); + +	cmd_work->ippdrv = ippdrv; +	cmd_work->c_node = c_node; +	queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work); +} + +static int ipp_queue_buf_with_run(struct device *dev, +		struct drm_exynos_ipp_cmd_node *c_node, +		struct drm_exynos_ipp_mem_node *m_node, +		struct drm_exynos_ipp_queue_buf *qbuf) +{ +	struct exynos_drm_ippdrv *ippdrv; +	struct drm_exynos_ipp_property *property; +	struct exynos_drm_ipp_ops *ops; +	int ret; + +	ippdrv = ipp_find_drv_by_handle(qbuf->prop_id); +	if (IS_ERR(ippdrv)) { +		DRM_ERROR("failed to get ipp driver.\n"); +		return -EFAULT; +	} + +	ops = ippdrv->ops[qbuf->ops_id]; +	if (!ops) { +		DRM_ERROR("failed to get ops.\n"); +		return -EFAULT; +	} + +	property = &c_node->property; + +	if (c_node->state != IPP_STATE_START) { +		DRM_DEBUG_KMS("bypass for invalid state.\n"); +		return 0; +	} + +	mutex_lock(&c_node->mem_lock); +	if (!ipp_check_mem_list(c_node)) { +		mutex_unlock(&c_node->mem_lock); +		DRM_DEBUG_KMS("empty memory.\n"); +		return 0; +	} + +	/* +	 * If set destination buffer and enabled clock, +	 * then m2m operations need start operations at queue_buf +	 */ +	if (ipp_is_m2m_cmd(property->cmd)) { +		struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work; + +		cmd_work->ctrl = IPP_CTRL_PLAY; +		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); +	} else { +		ret = ipp_set_mem_node(ippdrv, c_node, m_node); +		if (ret) { +			mutex_unlock(&c_node->mem_lock); +			DRM_ERROR("failed to set m node.\n"); +			return ret; +		} +	} +	mutex_unlock(&c_node->mem_lock); + +	return 0; +} + +static void ipp_clean_queue_buf(struct drm_device *drm_dev, +		struct drm_exynos_ipp_cmd_node *c_node, +		struct drm_exynos_ipp_queue_buf *qbuf) +{ +	struct drm_exynos_ipp_mem_node *m_node, *tm_node; + +	/* delete list */ +	mutex_lock(&c_node->mem_lock); +	list_for_each_entry_safe(m_node, tm_node, +		&c_node->mem_list[qbuf->ops_id], list) { +		if (m_node->buf_id == qbuf->buf_id && +		    m_node->ops_id == qbuf->ops_id) +			ipp_put_mem_node(drm_dev, c_node, m_node); +	} +	mutex_unlock(&c_node->mem_lock); +} + +int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data, +		struct drm_file *file) +{ +	struct drm_exynos_file_private *file_priv = file->driver_priv; +	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; +	struct device *dev = priv->dev; +	struct ipp_context *ctx = get_ipp_context(dev); +	struct drm_exynos_ipp_queue_buf *qbuf = data; +	struct drm_exynos_ipp_cmd_node *c_node; +	struct drm_exynos_ipp_mem_node *m_node; +	int ret; + +	if (!qbuf) { +		DRM_ERROR("invalid buf parameter.\n"); +		return -EINVAL; +	} + +	if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) { +		DRM_ERROR("invalid ops parameter.\n"); +		return -EINVAL; +	} + +	DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n", +		qbuf->prop_id, qbuf->ops_id ? "dst" : "src", +		qbuf->buf_id, qbuf->buf_type); + +	/* find command node */ +	c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, +		qbuf->prop_id); +	if (IS_ERR(c_node)) { +		DRM_ERROR("failed to get command node.\n"); +		return PTR_ERR(c_node); +	} + +	/* buffer control */ +	switch (qbuf->buf_type) { +	case IPP_BUF_ENQUEUE: +		/* get memory node */ +		m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf); +		if (IS_ERR(m_node)) { +			DRM_ERROR("failed to get m_node.\n"); +			return PTR_ERR(m_node); +		} + +		/* +		 * first step get event for destination buffer. +		 * and second step when M2M case run with destination buffer +		 * if needed. +		 */ +		if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) { +			/* get event for destination buffer */ +			ret = ipp_get_event(drm_dev, file, c_node, qbuf); +			if (ret) { +				DRM_ERROR("failed to get event.\n"); +				goto err_clean_node; +			} + +			/* +			 * M2M case run play control for streaming feature. +			 * other case set address and waiting. +			 */ +			ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf); +			if (ret) { +				DRM_ERROR("failed to run command.\n"); +				goto err_clean_node; +			} +		} +		break; +	case IPP_BUF_DEQUEUE: +		mutex_lock(&c_node->lock); + +		/* put event for destination buffer */ +		if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) +			ipp_put_event(c_node, qbuf); + +		ipp_clean_queue_buf(drm_dev, c_node, qbuf); + +		mutex_unlock(&c_node->lock); +		break; +	default: +		DRM_ERROR("invalid buffer control.\n"); +		return -EINVAL; +	} + +	return 0; + +err_clean_node: +	DRM_ERROR("clean memory nodes.\n"); + +	ipp_clean_queue_buf(drm_dev, c_node, qbuf); +	return ret; +} + +static bool exynos_drm_ipp_check_valid(struct device *dev, +		enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state) +{ +	if (ctrl != IPP_CTRL_PLAY) { +		if (pm_runtime_suspended(dev)) { +			DRM_ERROR("pm:runtime_suspended.\n"); +			goto err_status; +		} +	} + +	switch (ctrl) { +	case IPP_CTRL_PLAY: +		if (state != IPP_STATE_IDLE) +			goto err_status; +		break; +	case IPP_CTRL_STOP: +		if (state == IPP_STATE_STOP) +			goto err_status; +		break; +	case IPP_CTRL_PAUSE: +		if (state != IPP_STATE_START) +			goto err_status; +		break; +	case IPP_CTRL_RESUME: +		if (state != IPP_STATE_STOP) +			goto err_status; +		break; +	default: +		DRM_ERROR("invalid state.\n"); +		goto err_status; +	} + +	return true; + +err_status: +	DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state); +	return false; +} + +int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data, +		struct drm_file *file) +{ +	struct drm_exynos_file_private *file_priv = file->driver_priv; +	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; +	struct exynos_drm_ippdrv *ippdrv = NULL; +	struct device *dev = priv->dev; +	struct ipp_context *ctx = get_ipp_context(dev); +	struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data; +	struct drm_exynos_ipp_cmd_work *cmd_work; +	struct drm_exynos_ipp_cmd_node *c_node; + +	if (!ctx) { +		DRM_ERROR("invalid context.\n"); +		return -EINVAL; +	} + +	if (!cmd_ctrl) { +		DRM_ERROR("invalid control parameter.\n"); +		return -EINVAL; +	} + +	DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n", +		cmd_ctrl->ctrl, cmd_ctrl->prop_id); + +	ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id); +	if (IS_ERR(ippdrv)) { +		DRM_ERROR("failed to get ipp driver.\n"); +		return PTR_ERR(ippdrv); +	} + +	c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, +		cmd_ctrl->prop_id); +	if (IS_ERR(c_node)) { +		DRM_ERROR("invalid command node list.\n"); +		return PTR_ERR(c_node); +	} + +	if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl, +	    c_node->state)) { +		DRM_ERROR("invalid state.\n"); +		return -EINVAL; +	} + +	switch (cmd_ctrl->ctrl) { +	case IPP_CTRL_PLAY: +		if (pm_runtime_suspended(ippdrv->dev)) +			pm_runtime_get_sync(ippdrv->dev); + +		c_node->state = IPP_STATE_START; + +		cmd_work = c_node->start_work; +		cmd_work->ctrl = cmd_ctrl->ctrl; +		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); +		break; +	case IPP_CTRL_STOP: +		cmd_work = c_node->stop_work; +		cmd_work->ctrl = cmd_ctrl->ctrl; +		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); + +		if (!wait_for_completion_timeout(&c_node->stop_complete, +		    msecs_to_jiffies(300))) { +			DRM_ERROR("timeout stop:prop_id[%d]\n", +				c_node->property.prop_id); +		} + +		c_node->state = IPP_STATE_STOP; +		ippdrv->dedicated = false; +		mutex_lock(&ippdrv->cmd_lock); +		ipp_clean_cmd_node(ctx, c_node); + +		if (list_empty(&ippdrv->cmd_list)) +			pm_runtime_put_sync(ippdrv->dev); +		mutex_unlock(&ippdrv->cmd_lock); +		break; +	case IPP_CTRL_PAUSE: +		cmd_work = c_node->stop_work; +		cmd_work->ctrl = cmd_ctrl->ctrl; +		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); + +		if (!wait_for_completion_timeout(&c_node->stop_complete, +		    msecs_to_jiffies(200))) { +			DRM_ERROR("timeout stop:prop_id[%d]\n", +				c_node->property.prop_id); +		} + +		c_node->state = IPP_STATE_STOP; +		break; +	case IPP_CTRL_RESUME: +		c_node->state = IPP_STATE_START; +		cmd_work = c_node->start_work; +		cmd_work->ctrl = cmd_ctrl->ctrl; +		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node); +		break; +	default: +		DRM_ERROR("could not support this state currently.\n"); +		return -EINVAL; +	} + +	DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n", +		cmd_ctrl->ctrl, cmd_ctrl->prop_id); + +	return 0; +} + +int exynos_drm_ippnb_register(struct notifier_block *nb) +{ +	return blocking_notifier_chain_register( +		&exynos_drm_ippnb_list, nb); +} + +int exynos_drm_ippnb_unregister(struct notifier_block *nb) +{ +	return blocking_notifier_chain_unregister( +		&exynos_drm_ippnb_list, nb); +} + +int exynos_drm_ippnb_send_event(unsigned long val, void *v) +{ +	return blocking_notifier_call_chain( +		&exynos_drm_ippnb_list, val, v); +} + +static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv, +		struct drm_exynos_ipp_property *property) +{ +	struct exynos_drm_ipp_ops *ops = NULL; +	bool swap = false; +	int ret, i; + +	if (!property) { +		DRM_ERROR("invalid property parameter.\n"); +		return -EINVAL; +	} + +	DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); + +	/* reset h/w block */ +	if (ippdrv->reset && +	    ippdrv->reset(ippdrv->dev)) { +		DRM_ERROR("failed to reset.\n"); +		return -EINVAL; +	} + +	/* set source,destination operations */ +	for_each_ipp_ops(i) { +		struct drm_exynos_ipp_config *config = +			&property->config[i]; + +		ops = ippdrv->ops[i]; +		if (!ops || !config) { +			DRM_ERROR("not support ops and config.\n"); +			return -EINVAL; +		} + +		/* set format */ +		if (ops->set_fmt) { +			ret = ops->set_fmt(ippdrv->dev, config->fmt); +			if (ret) { +				DRM_ERROR("not support format.\n"); +				return ret; +			} +		} + +		/* set transform for rotation, flip */ +		if (ops->set_transf) { +			ret = ops->set_transf(ippdrv->dev, config->degree, +				config->flip, &swap); +			if (ret) { +				DRM_ERROR("not support tranf.\n"); +				return -EINVAL; +			} +		} + +		/* set size */ +		if (ops->set_size) { +			ret = ops->set_size(ippdrv->dev, swap, &config->pos, +				&config->sz); +			if (ret) { +				DRM_ERROR("not support size.\n"); +				return ret; +			} +		} +	} + +	return 0; +} + +static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv, +		struct drm_exynos_ipp_cmd_node *c_node) +{ +	struct drm_exynos_ipp_mem_node *m_node; +	struct drm_exynos_ipp_property *property = &c_node->property; +	struct list_head *head; +	int ret, i; + +	DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); + +	/* store command info in ippdrv */ +	ippdrv->c_node = c_node; + +	mutex_lock(&c_node->mem_lock); +	if (!ipp_check_mem_list(c_node)) { +		DRM_DEBUG_KMS("empty memory.\n"); +		ret = -ENOMEM; +		goto err_unlock; +	} + +	/* set current property in ippdrv */ +	ret = ipp_set_property(ippdrv, property); +	if (ret) { +		DRM_ERROR("failed to set property.\n"); +		ippdrv->c_node = NULL; +		goto err_unlock; +	} + +	/* check command */ +	switch (property->cmd) { +	case IPP_CMD_M2M: +		for_each_ipp_ops(i) { +			/* source/destination memory list */ +			head = &c_node->mem_list[i]; + +			m_node = list_first_entry(head, +				struct drm_exynos_ipp_mem_node, list); +			if (!m_node) { +				DRM_ERROR("failed to get node.\n"); +				ret = -EFAULT; +				goto err_unlock; +			} + +			DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node); + +			ret = ipp_set_mem_node(ippdrv, c_node, m_node); +			if (ret) { +				DRM_ERROR("failed to set m node.\n"); +				goto err_unlock; +			} +		} +		break; +	case IPP_CMD_WB: +		/* destination memory list */ +		head = &c_node->mem_list[EXYNOS_DRM_OPS_DST]; + +		list_for_each_entry(m_node, head, list) { +			ret = ipp_set_mem_node(ippdrv, c_node, m_node); +			if (ret) { +				DRM_ERROR("failed to set m node.\n"); +				goto err_unlock; +			} +		} +		break; +	case IPP_CMD_OUTPUT: +		/* source memory list */ +		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC]; + +		list_for_each_entry(m_node, head, list) { +			ret = ipp_set_mem_node(ippdrv, c_node, m_node); +			if (ret) { +				DRM_ERROR("failed to set m node.\n"); +				goto err_unlock; +			} +		} +		break; +	default: +		DRM_ERROR("invalid operations.\n"); +		ret = -EINVAL; +		goto err_unlock; +	} +	mutex_unlock(&c_node->mem_lock); + +	DRM_DEBUG_KMS("cmd[%d]\n", property->cmd); + +	/* start operations */ +	if (ippdrv->start) { +		ret = ippdrv->start(ippdrv->dev, property->cmd); +		if (ret) { +			DRM_ERROR("failed to start ops.\n"); +			ippdrv->c_node = NULL; +			return ret; +		} +	} + +	return 0; + +err_unlock: +	mutex_unlock(&c_node->mem_lock); +	ippdrv->c_node = NULL; +	return ret; +} + +static int ipp_stop_property(struct drm_device *drm_dev, +		struct exynos_drm_ippdrv *ippdrv, +		struct drm_exynos_ipp_cmd_node *c_node) +{ +	struct drm_exynos_ipp_mem_node *m_node, *tm_node; +	struct drm_exynos_ipp_property *property = &c_node->property; +	struct list_head *head; +	int ret = 0, i; + +	DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id); + +	/* put event */ +	ipp_put_event(c_node, NULL); + +	mutex_lock(&c_node->mem_lock); + +	/* check command */ +	switch (property->cmd) { +	case IPP_CMD_M2M: +		for_each_ipp_ops(i) { +			/* source/destination memory list */ +			head = &c_node->mem_list[i]; + +			list_for_each_entry_safe(m_node, tm_node, +				head, list) { +				ret = ipp_put_mem_node(drm_dev, c_node, +					m_node); +				if (ret) { +					DRM_ERROR("failed to put m_node.\n"); +					goto err_clear; +				} +			} +		} +		break; +	case IPP_CMD_WB: +		/* destination memory list */ +		head = &c_node->mem_list[EXYNOS_DRM_OPS_DST]; + +		list_for_each_entry_safe(m_node, tm_node, head, list) { +			ret = ipp_put_mem_node(drm_dev, c_node, m_node); +			if (ret) { +				DRM_ERROR("failed to put m_node.\n"); +				goto err_clear; +			} +		} +		break; +	case IPP_CMD_OUTPUT: +		/* source memory list */ +		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC]; + +		list_for_each_entry_safe(m_node, tm_node, head, list) { +			ret = ipp_put_mem_node(drm_dev, c_node, m_node); +			if (ret) { +				DRM_ERROR("failed to put m_node.\n"); +				goto err_clear; +			} +		} +		break; +	default: +		DRM_ERROR("invalid operations.\n"); +		ret = -EINVAL; +		goto err_clear; +	} + +err_clear: +	mutex_unlock(&c_node->mem_lock); + +	/* stop operations */ +	if (ippdrv->stop) +		ippdrv->stop(ippdrv->dev, property->cmd); + +	return ret; +} + +void ipp_sched_cmd(struct work_struct *work) +{ +	struct drm_exynos_ipp_cmd_work *cmd_work = +		(struct drm_exynos_ipp_cmd_work *)work; +	struct exynos_drm_ippdrv *ippdrv; +	struct drm_exynos_ipp_cmd_node *c_node; +	struct drm_exynos_ipp_property *property; +	int ret; + +	ippdrv = cmd_work->ippdrv; +	if (!ippdrv) { +		DRM_ERROR("invalid ippdrv list.\n"); +		return; +	} + +	c_node = cmd_work->c_node; +	if (!c_node) { +		DRM_ERROR("invalid command node list.\n"); +		return; +	} + +	mutex_lock(&c_node->lock); + +	property = &c_node->property; + +	switch (cmd_work->ctrl) { +	case IPP_CTRL_PLAY: +	case IPP_CTRL_RESUME: +		ret = ipp_start_property(ippdrv, c_node); +		if (ret) { +			DRM_ERROR("failed to start property:prop_id[%d]\n", +				c_node->property.prop_id); +			goto err_unlock; +		} + +		/* +		 * M2M case supports wait_completion of transfer. +		 * because M2M case supports single unit operation +		 * with multiple queue. +		 * M2M need to wait completion of data transfer. +		 */ +		if (ipp_is_m2m_cmd(property->cmd)) { +			if (!wait_for_completion_timeout +			    (&c_node->start_complete, msecs_to_jiffies(200))) { +				DRM_ERROR("timeout event:prop_id[%d]\n", +					c_node->property.prop_id); +				goto err_unlock; +			} +		} +		break; +	case IPP_CTRL_STOP: +	case IPP_CTRL_PAUSE: +		ret = ipp_stop_property(ippdrv->drm_dev, ippdrv, +			c_node); +		if (ret) { +			DRM_ERROR("failed to stop property.\n"); +			goto err_unlock; +		} + +		complete(&c_node->stop_complete); +		break; +	default: +		DRM_ERROR("unknown control type\n"); +		break; +	} + +	DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl); + +err_unlock: +	mutex_unlock(&c_node->lock); +} + +static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv, +		struct drm_exynos_ipp_cmd_node *c_node, int *buf_id) +{ +	struct drm_device *drm_dev = ippdrv->drm_dev; +	struct drm_exynos_ipp_property *property = &c_node->property; +	struct drm_exynos_ipp_mem_node *m_node; +	struct drm_exynos_ipp_queue_buf qbuf; +	struct drm_exynos_ipp_send_event *e; +	struct list_head *head; +	struct timeval now; +	unsigned long flags; +	u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, }; +	int ret, i; + +	for_each_ipp_ops(i) +		DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]); + +	if (!drm_dev) { +		DRM_ERROR("failed to get drm_dev.\n"); +		return -EINVAL; +	} + +	if (!property) { +		DRM_ERROR("failed to get property.\n"); +		return -EINVAL; +	} + +	mutex_lock(&c_node->event_lock); +	if (list_empty(&c_node->event_list)) { +		DRM_DEBUG_KMS("event list is empty.\n"); +		ret = 0; +		goto err_event_unlock; +	} + +	mutex_lock(&c_node->mem_lock); +	if (!ipp_check_mem_list(c_node)) { +		DRM_DEBUG_KMS("empty memory.\n"); +		ret = 0; +		goto err_mem_unlock; +	} + +	/* check command */ +	switch (property->cmd) { +	case IPP_CMD_M2M: +		for_each_ipp_ops(i) { +			/* source/destination memory list */ +			head = &c_node->mem_list[i]; + +			m_node = list_first_entry(head, +				struct drm_exynos_ipp_mem_node, list); +			if (!m_node) { +				DRM_ERROR("empty memory node.\n"); +				ret = -ENOMEM; +				goto err_mem_unlock; +			} + +			tbuf_id[i] = m_node->buf_id; +			DRM_DEBUG_KMS("%s buf_id[%d]\n", +				i ? "dst" : "src", tbuf_id[i]); + +			ret = ipp_put_mem_node(drm_dev, c_node, m_node); +			if (ret) +				DRM_ERROR("failed to put m_node.\n"); +		} +		break; +	case IPP_CMD_WB: +		/* clear buf for finding */ +		memset(&qbuf, 0x0, sizeof(qbuf)); +		qbuf.ops_id = EXYNOS_DRM_OPS_DST; +		qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST]; + +		/* get memory node entry */ +		m_node = ipp_find_mem_node(c_node, &qbuf); +		if (!m_node) { +			DRM_ERROR("empty memory node.\n"); +			ret = -ENOMEM; +			goto err_mem_unlock; +		} + +		tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id; + +		ret = ipp_put_mem_node(drm_dev, c_node, m_node); +		if (ret) +			DRM_ERROR("failed to put m_node.\n"); +		break; +	case IPP_CMD_OUTPUT: +		/* source memory list */ +		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC]; + +		m_node = list_first_entry(head, +			struct drm_exynos_ipp_mem_node, list); +		if (!m_node) { +			DRM_ERROR("empty memory node.\n"); +			ret = -ENOMEM; +			goto err_mem_unlock; +		} + +		tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id; + +		ret = ipp_put_mem_node(drm_dev, c_node, m_node); +		if (ret) +			DRM_ERROR("failed to put m_node.\n"); +		break; +	default: +		DRM_ERROR("invalid operations.\n"); +		ret = -EINVAL; +		goto err_mem_unlock; +	} +	mutex_unlock(&c_node->mem_lock); + +	if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST]) +		DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n", +			tbuf_id[1], buf_id[1], property->prop_id); + +	/* +	 * command node have event list of destination buffer +	 * If destination buffer enqueue to mem list, +	 * then we make event and link to event list tail. +	 * so, we get first event for first enqueued buffer. +	 */ +	e = list_first_entry(&c_node->event_list, +		struct drm_exynos_ipp_send_event, base.link); + +	do_gettimeofday(&now); +	DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec); +	e->event.tv_sec = now.tv_sec; +	e->event.tv_usec = now.tv_usec; +	e->event.prop_id = property->prop_id; + +	/* set buffer id about source destination */ +	for_each_ipp_ops(i) +		e->event.buf_id[i] = tbuf_id[i]; + +	spin_lock_irqsave(&drm_dev->event_lock, flags); +	list_move_tail(&e->base.link, &e->base.file_priv->event_list); +	wake_up_interruptible(&e->base.file_priv->event_wait); +	spin_unlock_irqrestore(&drm_dev->event_lock, flags); +	mutex_unlock(&c_node->event_lock); + +	DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n", +		property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]); + +	return 0; + +err_mem_unlock: +	mutex_unlock(&c_node->mem_lock); +err_event_unlock: +	mutex_unlock(&c_node->event_lock); +	return ret; +} + +void ipp_sched_event(struct work_struct *work) +{ +	struct drm_exynos_ipp_event_work *event_work = +		(struct drm_exynos_ipp_event_work *)work; +	struct exynos_drm_ippdrv *ippdrv; +	struct drm_exynos_ipp_cmd_node *c_node; +	int ret; + +	if (!event_work) { +		DRM_ERROR("failed to get event_work.\n"); +		return; +	} + +	DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]); + +	ippdrv = event_work->ippdrv; +	if (!ippdrv) { +		DRM_ERROR("failed to get ipp driver.\n"); +		return; +	} + +	c_node = ippdrv->c_node; +	if (!c_node) { +		DRM_ERROR("failed to get command node.\n"); +		return; +	} + +	/* +	 * IPP supports command thread, event thread synchronization. +	 * If IPP close immediately from user land, then IPP make +	 * synchronization with command thread, so make complete event. +	 * or going out operations. +	 */ +	if (c_node->state != IPP_STATE_START) { +		DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n", +			c_node->state, c_node->property.prop_id); +		goto err_completion; +	} + +	ret = ipp_send_event(ippdrv, c_node, event_work->buf_id); +	if (ret) { +		DRM_ERROR("failed to send event.\n"); +		goto err_completion; +	} + +err_completion: +	if (ipp_is_m2m_cmd(c_node->property.cmd)) +		complete(&c_node->start_complete); +} + +static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev) +{ +	struct ipp_context *ctx = get_ipp_context(dev); +	struct exynos_drm_ippdrv *ippdrv; +	int ret, count = 0; + +	/* get ipp driver entry */ +	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { +		u32 ipp_id; + +		ippdrv->drm_dev = drm_dev; + +		ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv, +				    &ipp_id); +		if (ret || ipp_id == 0) { +			DRM_ERROR("failed to create id.\n"); +			goto err; +		} + +		DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n", +			count++, (int)ippdrv, ipp_id); + +		ippdrv->prop_list.ipp_id = ipp_id; + +		/* store parent device for node */ +		ippdrv->parent_dev = dev; + +		/* store event work queue and handler */ +		ippdrv->event_workq = ctx->event_workq; +		ippdrv->sched_event = ipp_sched_event; +		INIT_LIST_HEAD(&ippdrv->cmd_list); +		mutex_init(&ippdrv->cmd_lock); + +		if (is_drm_iommu_supported(drm_dev)) { +			ret = drm_iommu_attach_device(drm_dev, ippdrv->dev); +			if (ret) { +				DRM_ERROR("failed to activate iommu\n"); +				goto err; +			} +		} +	} + +	return 0; + +err: +	/* get ipp driver entry */ +	list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list, +						drv_list) { +		if (is_drm_iommu_supported(drm_dev)) +			drm_iommu_detach_device(drm_dev, ippdrv->dev); + +		ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock, +				ippdrv->prop_list.ipp_id); +	} + +	return ret; +} + +static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev) +{ +	struct exynos_drm_ippdrv *ippdrv; +	struct ipp_context *ctx = get_ipp_context(dev); + +	/* get ipp driver entry */ +	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { +		if (is_drm_iommu_supported(drm_dev)) +			drm_iommu_detach_device(drm_dev, ippdrv->dev); + +		ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock, +				ippdrv->prop_list.ipp_id); + +		ippdrv->drm_dev = NULL; +		exynos_drm_ippdrv_unregister(ippdrv); +	} +} + +static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev, +		struct drm_file *file) +{ +	struct drm_exynos_file_private *file_priv = file->driver_priv; +	struct exynos_drm_ipp_private *priv; + +	priv = kzalloc(sizeof(*priv), GFP_KERNEL); +	if (!priv) +		return -ENOMEM; +	priv->dev = dev; +	file_priv->ipp_priv = priv; + +	INIT_LIST_HEAD(&priv->event_list); + +	DRM_DEBUG_KMS("done priv[0x%x]\n", (int)priv); + +	return 0; +} + +static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev, +		struct drm_file *file) +{ +	struct drm_exynos_file_private *file_priv = file->driver_priv; +	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; +	struct exynos_drm_ippdrv *ippdrv = NULL; +	struct ipp_context *ctx = get_ipp_context(dev); +	struct drm_exynos_ipp_cmd_node *c_node, *tc_node; +	int count = 0; + +	DRM_DEBUG_KMS("for priv[0x%x]\n", (int)priv); + +	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { +		mutex_lock(&ippdrv->cmd_lock); +		list_for_each_entry_safe(c_node, tc_node, +			&ippdrv->cmd_list, list) { +			DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", +				count++, (int)ippdrv); + +			if (c_node->priv == priv) { +				/* +				 * userland goto unnormal state. process killed. +				 * and close the file. +				 * so, IPP didn't called stop cmd ctrl. +				 * so, we are make stop operation in this state. +				 */ +				if (c_node->state == IPP_STATE_START) { +					ipp_stop_property(drm_dev, ippdrv, +						c_node); +					c_node->state = IPP_STATE_STOP; +				} + +				ippdrv->dedicated = false; +				ipp_clean_cmd_node(ctx, c_node); +				if (list_empty(&ippdrv->cmd_list)) +					pm_runtime_put_sync(ippdrv->dev); +			} +		} +		mutex_unlock(&ippdrv->cmd_lock); +	} + +	kfree(priv); +	return; +} + +static int ipp_probe(struct platform_device *pdev) +{ +	struct device *dev = &pdev->dev; +	struct ipp_context *ctx; +	struct exynos_drm_subdrv *subdrv; +	int ret; + +	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); +	if (!ctx) +		return -ENOMEM; + +	mutex_init(&ctx->ipp_lock); +	mutex_init(&ctx->prop_lock); + +	idr_init(&ctx->ipp_idr); +	idr_init(&ctx->prop_idr); + +	/* +	 * create single thread for ipp event +	 * IPP supports event thread for IPP drivers. +	 * IPP driver send event_work to this thread. +	 * and IPP event thread send event to user process. +	 */ +	ctx->event_workq = create_singlethread_workqueue("ipp_event"); +	if (!ctx->event_workq) { +		dev_err(dev, "failed to create event workqueue\n"); +		return -EINVAL; +	} + +	/* +	 * create single thread for ipp command +	 * IPP supports command thread for user process. +	 * user process make command node using set property ioctl. +	 * and make start_work and send this work to command thread. +	 * and then this command thread start property. +	 */ +	ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd"); +	if (!ctx->cmd_workq) { +		dev_err(dev, "failed to create cmd workqueue\n"); +		ret = -EINVAL; +		goto err_event_workq; +	} + +	/* set sub driver informations */ +	subdrv = &ctx->subdrv; +	subdrv->dev = dev; +	subdrv->probe = ipp_subdrv_probe; +	subdrv->remove = ipp_subdrv_remove; +	subdrv->open = ipp_subdrv_open; +	subdrv->close = ipp_subdrv_close; + +	platform_set_drvdata(pdev, ctx); + +	ret = exynos_drm_subdrv_register(subdrv); +	if (ret < 0) { +		DRM_ERROR("failed to register drm ipp device.\n"); +		goto err_cmd_workq; +	} + +	dev_info(dev, "drm ipp registered successfully.\n"); + +	return 0; + +err_cmd_workq: +	destroy_workqueue(ctx->cmd_workq); +err_event_workq: +	destroy_workqueue(ctx->event_workq); +	return ret; +} + +static int ipp_remove(struct platform_device *pdev) +{ +	struct ipp_context *ctx = platform_get_drvdata(pdev); + +	/* unregister sub driver */ +	exynos_drm_subdrv_unregister(&ctx->subdrv); + +	/* remove,destroy ipp idr */ +	idr_destroy(&ctx->ipp_idr); +	idr_destroy(&ctx->prop_idr); + +	mutex_destroy(&ctx->ipp_lock); +	mutex_destroy(&ctx->prop_lock); + +	/* destroy command, event work queue */ +	destroy_workqueue(ctx->cmd_workq); +	destroy_workqueue(ctx->event_workq); + +	return 0; +} + +static int ipp_power_ctrl(struct ipp_context *ctx, bool enable) +{ +	DRM_DEBUG_KMS("enable[%d]\n", enable); + +	return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int ipp_suspend(struct device *dev) +{ +	struct ipp_context *ctx = get_ipp_context(dev); + +	if (pm_runtime_suspended(dev)) +		return 0; + +	return ipp_power_ctrl(ctx, false); +} + +static int ipp_resume(struct device *dev) +{ +	struct ipp_context *ctx = get_ipp_context(dev); + +	if (!pm_runtime_suspended(dev)) +		return ipp_power_ctrl(ctx, true); + +	return 0; +} +#endif + +#ifdef CONFIG_PM_RUNTIME +static int ipp_runtime_suspend(struct device *dev) +{ +	struct ipp_context *ctx = get_ipp_context(dev); + +	return ipp_power_ctrl(ctx, false); +} + +static int ipp_runtime_resume(struct device *dev) +{ +	struct ipp_context *ctx = get_ipp_context(dev); + +	return ipp_power_ctrl(ctx, true); +} +#endif + +static const struct dev_pm_ops ipp_pm_ops = { +	SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume) +	SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL) +}; + +struct platform_driver ipp_driver = { +	.probe		= ipp_probe, +	.remove		= ipp_remove, +	.driver		= { +		.name	= "exynos-drm-ipp", +		.owner	= THIS_MODULE, +		.pm	= &ipp_pm_ops, +	}, +}; + diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h new file mode 100644 index 00000000000..7aaeaae757c --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h @@ -0,0 +1,252 @@ +/* + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * + * Authors: + *	Eunchul Kim <chulspro.kim@samsung.com> + *	Jinyoung Jeon <jy0.jeon@samsung.com> + *	Sangmin Lee <lsmin.lee@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#ifndef _EXYNOS_DRM_IPP_H_ +#define _EXYNOS_DRM_IPP_H_ + +#define for_each_ipp_ops(pos)	\ +	for (pos = 0; pos < EXYNOS_DRM_OPS_MAX; pos++) +#define for_each_ipp_planar(pos)	\ +	for (pos = 0; pos < EXYNOS_DRM_PLANAR_MAX; pos++) + +#define IPP_GET_LCD_WIDTH	_IOR('F', 302, int) +#define IPP_GET_LCD_HEIGHT	_IOR('F', 303, int) +#define IPP_SET_WRITEBACK	_IOW('F', 304, u32) + +/* definition of state */ +enum drm_exynos_ipp_state { +	IPP_STATE_IDLE, +	IPP_STATE_START, +	IPP_STATE_STOP, +}; + +/* + * A structure of command work information. + * @work: work structure. + * @ippdrv: current work ippdrv. + * @c_node: command node information. + * @ctrl: command control. + */ +struct drm_exynos_ipp_cmd_work { +	struct work_struct	work; +	struct exynos_drm_ippdrv	*ippdrv; +	struct drm_exynos_ipp_cmd_node *c_node; +	enum drm_exynos_ipp_ctrl	ctrl; +}; + +/* + * A structure of command node. + * + * @priv: IPP private information. + * @list: list head to command queue information. + * @event_list: list head of event. + * @mem_list: list head to source,destination memory queue information. + * @lock: lock for synchronization of access to ioctl. + * @mem_lock: lock for synchronization of access to memory nodes. + * @event_lock: lock for synchronization of access to scheduled event. + * @start_complete: completion of start of command. + * @stop_complete: completion of stop of command. + * @property: property information. + * @start_work: start command work structure. + * @stop_work: stop command work structure. + * @event_work: event work structure. + * @state: state of command node. + */ +struct drm_exynos_ipp_cmd_node { +	struct exynos_drm_ipp_private *priv; +	struct list_head	list; +	struct list_head	event_list; +	struct list_head	mem_list[EXYNOS_DRM_OPS_MAX]; +	struct mutex	lock; +	struct mutex	mem_lock; +	struct mutex	event_lock; +	struct completion	start_complete; +	struct completion	stop_complete; +	struct drm_exynos_ipp_property	property; +	struct drm_exynos_ipp_cmd_work *start_work; +	struct drm_exynos_ipp_cmd_work *stop_work; +	struct drm_exynos_ipp_event_work *event_work; +	enum drm_exynos_ipp_state	state; +}; + +/* + * A structure of buffer information. + * + * @handles: Y, Cb, Cr each gem object handle. + * @base: Y, Cb, Cr each planar address. + */ +struct drm_exynos_ipp_buf_info { +	unsigned long	handles[EXYNOS_DRM_PLANAR_MAX]; +	dma_addr_t	base[EXYNOS_DRM_PLANAR_MAX]; +}; + +/* + * A structure of wb setting information. + * + * @enable: enable flag for wb. + * @refresh: HZ of the refresh rate. + */ +struct drm_exynos_ipp_set_wb { +	__u32	enable; +	__u32	refresh; +}; + +/* + * A structure of event work information. + * + * @work: work structure. + * @ippdrv: current work ippdrv. + * @buf_id: id of src, dst buffer. + */ +struct drm_exynos_ipp_event_work { +	struct work_struct	work; +	struct exynos_drm_ippdrv *ippdrv; +	u32	buf_id[EXYNOS_DRM_OPS_MAX]; +}; + +/* + * A structure of source,destination operations. + * + * @set_fmt: set format of image. + * @set_transf: set transform(rotations, flip). + * @set_size: set size of region. + * @set_addr: set address for dma. + */ +struct exynos_drm_ipp_ops { +	int (*set_fmt)(struct device *dev, u32 fmt); +	int (*set_transf)(struct device *dev, +		enum drm_exynos_degree degree, +		enum drm_exynos_flip flip, bool *swap); +	int (*set_size)(struct device *dev, int swap, +		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz); +	int (*set_addr)(struct device *dev, +		 struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id, +		enum drm_exynos_ipp_buf_type buf_type); +}; + +/* + * A structure of ipp driver. + * + * @drv_list: list head for registed sub driver information. + * @parent_dev: parent device information. + * @dev: platform device. + * @drm_dev: drm device. + * @dedicated: dedicated ipp device. + * @ops: source, destination operations. + * @event_workq: event work queue. + * @c_node: current command information. + * @cmd_list: list head for command information. + * @cmd_lock: lock for synchronization of access to cmd_list. + * @prop_list: property informations of current ipp driver. + * @check_property: check property about format, size, buffer. + * @reset: reset ipp block. + * @start: ipp each device start. + * @stop: ipp each device stop. + * @sched_event: work schedule handler. + */ +struct exynos_drm_ippdrv { +	struct list_head	drv_list; +	struct device	*parent_dev; +	struct device	*dev; +	struct drm_device	*drm_dev; +	bool	dedicated; +	struct exynos_drm_ipp_ops	*ops[EXYNOS_DRM_OPS_MAX]; +	struct workqueue_struct	*event_workq; +	struct drm_exynos_ipp_cmd_node *c_node; +	struct list_head	cmd_list; +	struct mutex	cmd_lock; +	struct drm_exynos_ipp_prop_list prop_list; + +	int (*check_property)(struct device *dev, +		struct drm_exynos_ipp_property *property); +	int (*reset)(struct device *dev); +	int (*start)(struct device *dev, enum drm_exynos_ipp_cmd cmd); +	void (*stop)(struct device *dev, enum drm_exynos_ipp_cmd cmd); +	void (*sched_event)(struct work_struct *work); +}; + +#ifdef CONFIG_DRM_EXYNOS_IPP +extern int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv); +extern int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv); +extern int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data, +					 struct drm_file *file); +extern int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data, +					 struct drm_file *file); +extern int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data, +					 struct drm_file *file); +extern int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data, +					 struct drm_file *file); +extern int exynos_drm_ippnb_register(struct notifier_block *nb); +extern int exynos_drm_ippnb_unregister(struct notifier_block *nb); +extern int exynos_drm_ippnb_send_event(unsigned long val, void *v); +extern void ipp_sched_cmd(struct work_struct *work); +extern void ipp_sched_event(struct work_struct *work); + +#else +static inline int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv) +{ +	return -ENODEV; +} + +static inline int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv) +{ +	return -ENODEV; +} + +static inline int exynos_drm_ipp_get_property(struct drm_device *drm_dev, +						void *data, +						struct drm_file *file_priv) +{ +	return -ENOTTY; +} + +static inline int exynos_drm_ipp_set_property(struct drm_device *drm_dev, +						void *data, +						struct drm_file *file_priv) +{ +	return -ENOTTY; +} + +static inline int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, +						void *data, +						struct drm_file *file) +{ +	return -ENOTTY; +} + +static inline int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, +						void *data, +						struct drm_file *file) +{ +	return -ENOTTY; +} + +static inline int exynos_drm_ippnb_register(struct notifier_block *nb) +{ +	return -ENODEV; +} + +static inline int exynos_drm_ippnb_unregister(struct notifier_block *nb) +{ +	return -ENODEV; +} + +static inline int exynos_drm_ippnb_send_event(unsigned long val, void *v) +{ +	return -ENOTTY; +} +#endif + +#endif /* _EXYNOS_DRM_IPP_H_ */ + diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c new file mode 100644 index 00000000000..8371cbd7631 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -0,0 +1,281 @@ +/* + * Copyright (C) 2011 Samsung Electronics Co.Ltd + * Authors: Joonyoung Shim <jy0922.shim@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + * + */ + +#include <drm/drmP.h> + +#include <drm/exynos_drm.h> +#include "exynos_drm_drv.h" +#include "exynos_drm_crtc.h" +#include "exynos_drm_fb.h" +#include "exynos_drm_gem.h" +#include "exynos_drm_plane.h" + +#define to_exynos_plane(x)	container_of(x, struct exynos_plane, base) + +struct exynos_plane { +	struct drm_plane		base; +	struct exynos_drm_overlay	overlay; +	bool				enabled; +}; + +static const uint32_t formats[] = { +	DRM_FORMAT_XRGB8888, +	DRM_FORMAT_ARGB8888, +	DRM_FORMAT_NV12, +	DRM_FORMAT_NV12MT, +}; + +/* + * This function is to get X or Y size shown via screen. This needs length and + * start position of CRTC. + * + *      <--- length ---> + * CRTC ---------------- + *      ^ start        ^ end + * + * There are six cases from a to f. + * + *             <----- SCREEN -----> + *             0                 last + *   ----------|------------------|---------- + * CRTCs + * a ------- + *        b ------- + *        c -------------------------- + *                 d -------- + *                           e ------- + *                                  f ------- + */ +static int exynos_plane_get_size(int start, unsigned length, unsigned last) +{ +	int end = start + length; +	int size = 0; + +	if (start <= 0) { +		if (end > 0) +			size = min_t(unsigned, end, last); +	} else if (start <= last) { +		size = min_t(unsigned, last - start, length); +	} + +	return size; +} + +int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc, +			  struct drm_framebuffer *fb, int crtc_x, int crtc_y, +			  unsigned int crtc_w, unsigned int crtc_h, +			  uint32_t src_x, uint32_t src_y, +			  uint32_t src_w, uint32_t src_h) +{ +	struct exynos_plane *exynos_plane = to_exynos_plane(plane); +	struct exynos_drm_overlay *overlay = &exynos_plane->overlay; +	unsigned int actual_w; +	unsigned int actual_h; +	int nr; +	int i; + +	nr = exynos_drm_fb_get_buf_cnt(fb); +	for (i = 0; i < nr; i++) { +		struct exynos_drm_gem_buf *buffer = exynos_drm_fb_buffer(fb, i); + +		if (!buffer) { +			DRM_DEBUG_KMS("buffer is null\n"); +			return -EFAULT; +		} + +		overlay->dma_addr[i] = buffer->dma_addr; + +		DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n", +				i, (unsigned long)overlay->dma_addr[i]); +	} + +	actual_w = exynos_plane_get_size(crtc_x, crtc_w, crtc->mode.hdisplay); +	actual_h = exynos_plane_get_size(crtc_y, crtc_h, crtc->mode.vdisplay); + +	if (crtc_x < 0) { +		if (actual_w) +			src_x -= crtc_x; +		crtc_x = 0; +	} + +	if (crtc_y < 0) { +		if (actual_h) +			src_y -= crtc_y; +		crtc_y = 0; +	} + +	/* set drm framebuffer data. */ +	overlay->fb_x = src_x; +	overlay->fb_y = src_y; +	overlay->fb_width = fb->width; +	overlay->fb_height = fb->height; +	overlay->src_width = src_w; +	overlay->src_height = src_h; +	overlay->bpp = fb->bits_per_pixel; +	overlay->pitch = fb->pitches[0]; +	overlay->pixel_format = fb->pixel_format; + +	/* set overlay range to be displayed. */ +	overlay->crtc_x = crtc_x; +	overlay->crtc_y = crtc_y; +	overlay->crtc_width = actual_w; +	overlay->crtc_height = actual_h; + +	/* set drm mode data. */ +	overlay->mode_width = crtc->mode.hdisplay; +	overlay->mode_height = crtc->mode.vdisplay; +	overlay->refresh = crtc->mode.vrefresh; +	overlay->scan_flag = crtc->mode.flags; + +	DRM_DEBUG_KMS("overlay : offset_x/y(%d,%d), width/height(%d,%d)", +			overlay->crtc_x, overlay->crtc_y, +			overlay->crtc_width, overlay->crtc_height); + +	exynos_drm_crtc_plane_mode_set(crtc, overlay); + +	return 0; +} + +void exynos_plane_commit(struct drm_plane *plane) +{ +	struct exynos_plane *exynos_plane = to_exynos_plane(plane); +	struct exynos_drm_overlay *overlay = &exynos_plane->overlay; + +	exynos_drm_crtc_plane_commit(plane->crtc, overlay->zpos); +} + +void exynos_plane_dpms(struct drm_plane *plane, int mode) +{ +	struct exynos_plane *exynos_plane = to_exynos_plane(plane); +	struct exynos_drm_overlay *overlay = &exynos_plane->overlay; + +	if (mode == DRM_MODE_DPMS_ON) { +		if (exynos_plane->enabled) +			return; + +		exynos_drm_crtc_plane_enable(plane->crtc, overlay->zpos); +		exynos_plane->enabled = true; +	} else { +		if (!exynos_plane->enabled) +			return; + +		exynos_drm_crtc_plane_disable(plane->crtc, overlay->zpos); +		exynos_plane->enabled = false; +	} +} + +static int +exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, +		     struct drm_framebuffer *fb, int crtc_x, int crtc_y, +		     unsigned int crtc_w, unsigned int crtc_h, +		     uint32_t src_x, uint32_t src_y, +		     uint32_t src_w, uint32_t src_h) +{ +	int ret; + +	ret = exynos_plane_mode_set(plane, crtc, fb, crtc_x, crtc_y, +			crtc_w, crtc_h, src_x >> 16, src_y >> 16, +			src_w >> 16, src_h >> 16); +	if (ret < 0) +		return ret; + +	plane->crtc = crtc; + +	exynos_plane_commit(plane); +	exynos_plane_dpms(plane, DRM_MODE_DPMS_ON); + +	return 0; +} + +static int exynos_disable_plane(struct drm_plane *plane) +{ +	exynos_plane_dpms(plane, DRM_MODE_DPMS_OFF); + +	return 0; +} + +static void exynos_plane_destroy(struct drm_plane *plane) +{ +	struct exynos_plane *exynos_plane = to_exynos_plane(plane); + +	exynos_disable_plane(plane); +	drm_plane_cleanup(plane); +	kfree(exynos_plane); +} + +static int exynos_plane_set_property(struct drm_plane *plane, +				     struct drm_property *property, +				     uint64_t val) +{ +	struct drm_device *dev = plane->dev; +	struct exynos_plane *exynos_plane = to_exynos_plane(plane); +	struct exynos_drm_private *dev_priv = dev->dev_private; + +	if (property == dev_priv->plane_zpos_property) { +		exynos_plane->overlay.zpos = val; +		return 0; +	} + +	return -EINVAL; +} + +static struct drm_plane_funcs exynos_plane_funcs = { +	.update_plane	= exynos_update_plane, +	.disable_plane	= exynos_disable_plane, +	.destroy	= exynos_plane_destroy, +	.set_property	= exynos_plane_set_property, +}; + +static void exynos_plane_attach_zpos_property(struct drm_plane *plane) +{ +	struct drm_device *dev = plane->dev; +	struct exynos_drm_private *dev_priv = dev->dev_private; +	struct drm_property *prop; + +	prop = dev_priv->plane_zpos_property; +	if (!prop) { +		prop = drm_property_create_range(dev, 0, "zpos", 0, +						 MAX_PLANE - 1); +		if (!prop) +			return; + +		dev_priv->plane_zpos_property = prop; +	} + +	drm_object_attach_property(&plane->base, prop, 0); +} + +struct drm_plane *exynos_plane_init(struct drm_device *dev, +				    unsigned long possible_crtcs, bool priv) +{ +	struct exynos_plane *exynos_plane; +	int err; + +	exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL); +	if (!exynos_plane) +		return NULL; + +	err = drm_plane_init(dev, &exynos_plane->base, possible_crtcs, +			      &exynos_plane_funcs, formats, ARRAY_SIZE(formats), +			      priv); +	if (err) { +		DRM_ERROR("failed to initialize plane\n"); +		kfree(exynos_plane); +		return NULL; +	} + +	if (priv) +		exynos_plane->overlay.zpos = DEFAULT_ZPOS; +	else +		exynos_plane_attach_zpos_property(&exynos_plane->base); + +	return &exynos_plane->base; +} diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h new file mode 100644 index 00000000000..84d464c90d3 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2011 Samsung Electronics Co.Ltd + * Authors: Joonyoung Shim <jy0922.shim@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + * + */ + +int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc, +			  struct drm_framebuffer *fb, int crtc_x, int crtc_y, +			  unsigned int crtc_w, unsigned int crtc_h, +			  uint32_t src_x, uint32_t src_y, +			  uint32_t src_w, uint32_t src_h); +void exynos_plane_commit(struct drm_plane *plane); +void exynos_plane_dpms(struct drm_plane *plane, int mode); +struct drm_plane *exynos_plane_init(struct drm_device *dev, +				    unsigned long possible_crtcs, bool priv); diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c new file mode 100644 index 00000000000..f01fbb6dc1f --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -0,0 +1,856 @@ +/* + * Copyright (C) 2012 Samsung Electronics Co.Ltd + * Authors: + *	YoungJun Cho <yj44.cho@samsung.com> + *	Eunchul Kim <chulspro.kim@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundationr + */ + +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/pm_runtime.h> + +#include <drm/drmP.h> +#include <drm/exynos_drm.h> +#include "regs-rotator.h" +#include "exynos_drm.h" +#include "exynos_drm_drv.h" +#include "exynos_drm_ipp.h" + +/* + * Rotator supports image crop/rotator and input/output DMA operations. + * input DMA reads image data from the memory. + * output DMA writes image data to memory. + * + * M2M operation : supports crop/scale/rotation/csc so on. + * Memory ----> Rotator H/W ----> Memory. + */ + +/* + * TODO + * 1. check suspend/resume api if needed. + * 2. need to check use case platform_device_id. + * 3. check src/dst size with, height. + * 4. need to add supported list in prop_list. + */ + +#define get_rot_context(dev)	platform_get_drvdata(to_platform_device(dev)) +#define get_ctx_from_ippdrv(ippdrv)	container_of(ippdrv,\ +					struct rot_context, ippdrv); +#define rot_read(offset)		readl(rot->regs + (offset)) +#define rot_write(cfg, offset)	writel(cfg, rot->regs + (offset)) + +enum rot_irq_status { +	ROT_IRQ_STATUS_COMPLETE	= 8, +	ROT_IRQ_STATUS_ILLEGAL	= 9, +}; + +/* + * A structure of limitation. + * + * @min_w: minimum width. + * @min_h: minimum height. + * @max_w: maximum width. + * @max_h: maximum height. + * @align: align size. + */ +struct rot_limit { +	u32	min_w; +	u32	min_h; +	u32	max_w; +	u32	max_h; +	u32	align; +}; + +/* + * A structure of limitation table. + * + * @ycbcr420_2p: case of YUV. + * @rgb888: case of RGB. + */ +struct rot_limit_table { +	struct rot_limit	ycbcr420_2p; +	struct rot_limit	rgb888; +}; + +/* + * A structure of rotator context. + * @ippdrv: prepare initialization using ippdrv. + * @regs_res: register resources. + * @regs: memory mapped io registers. + * @clock: rotator gate clock. + * @limit_tbl: limitation of rotator. + * @irq: irq number. + * @cur_buf_id: current operation buffer id. + * @suspended: suspended state. + */ +struct rot_context { +	struct exynos_drm_ippdrv	ippdrv; +	struct resource	*regs_res; +	void __iomem	*regs; +	struct clk	*clock; +	struct rot_limit_table	*limit_tbl; +	int	irq; +	int	cur_buf_id[EXYNOS_DRM_OPS_MAX]; +	bool	suspended; +}; + +static void rotator_reg_set_irq(struct rot_context *rot, bool enable) +{ +	u32 val = rot_read(ROT_CONFIG); + +	if (enable == true) +		val |= ROT_CONFIG_IRQ; +	else +		val &= ~ROT_CONFIG_IRQ; + +	rot_write(val, ROT_CONFIG); +} + +static u32 rotator_reg_get_fmt(struct rot_context *rot) +{ +	u32 val = rot_read(ROT_CONTROL); + +	val &= ROT_CONTROL_FMT_MASK; + +	return val; +} + +static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot) +{ +	u32 val = rot_read(ROT_STATUS); + +	val = ROT_STATUS_IRQ(val); + +	if (val == ROT_STATUS_IRQ_VAL_COMPLETE) +		return ROT_IRQ_STATUS_COMPLETE; + +	return ROT_IRQ_STATUS_ILLEGAL; +} + +static irqreturn_t rotator_irq_handler(int irq, void *arg) +{ +	struct rot_context *rot = arg; +	struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv; +	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; +	struct drm_exynos_ipp_event_work *event_work = c_node->event_work; +	enum rot_irq_status irq_status; +	u32 val; + +	/* Get execution result */ +	irq_status = rotator_reg_get_irq_status(rot); + +	/* clear status */ +	val = rot_read(ROT_STATUS); +	val |= ROT_STATUS_IRQ_PENDING((u32)irq_status); +	rot_write(val, ROT_STATUS); + +	if (irq_status == ROT_IRQ_STATUS_COMPLETE) { +		event_work->ippdrv = ippdrv; +		event_work->buf_id[EXYNOS_DRM_OPS_DST] = +			rot->cur_buf_id[EXYNOS_DRM_OPS_DST]; +		queue_work(ippdrv->event_workq, +			(struct work_struct *)event_work); +	} else { +		DRM_ERROR("the SFR is set illegally\n"); +	} + +	return IRQ_HANDLED; +} + +static void rotator_align_size(struct rot_context *rot, u32 fmt, u32 *hsize, +		u32 *vsize) +{ +	struct rot_limit_table *limit_tbl = rot->limit_tbl; +	struct rot_limit *limit; +	u32 mask, val; + +	/* Get size limit */ +	if (fmt == ROT_CONTROL_FMT_RGB888) +		limit = &limit_tbl->rgb888; +	else +		limit = &limit_tbl->ycbcr420_2p; + +	/* Get mask for rounding to nearest aligned val */ +	mask = ~((1 << limit->align) - 1); + +	/* Set aligned width */ +	val = ROT_ALIGN(*hsize, limit->align, mask); +	if (val < limit->min_w) +		*hsize = ROT_MIN(limit->min_w, mask); +	else if (val > limit->max_w) +		*hsize = ROT_MAX(limit->max_w, mask); +	else +		*hsize = val; + +	/* Set aligned height */ +	val = ROT_ALIGN(*vsize, limit->align, mask); +	if (val < limit->min_h) +		*vsize = ROT_MIN(limit->min_h, mask); +	else if (val > limit->max_h) +		*vsize = ROT_MAX(limit->max_h, mask); +	else +		*vsize = val; +} + +static int rotator_src_set_fmt(struct device *dev, u32 fmt) +{ +	struct rot_context *rot = dev_get_drvdata(dev); +	u32 val; + +	val = rot_read(ROT_CONTROL); +	val &= ~ROT_CONTROL_FMT_MASK; + +	switch (fmt) { +	case DRM_FORMAT_NV12: +		val |= ROT_CONTROL_FMT_YCBCR420_2P; +		break; +	case DRM_FORMAT_XRGB8888: +		val |= ROT_CONTROL_FMT_RGB888; +		break; +	default: +		DRM_ERROR("invalid image format\n"); +		return -EINVAL; +	} + +	rot_write(val, ROT_CONTROL); + +	return 0; +} + +static inline bool rotator_check_reg_fmt(u32 fmt) +{ +	if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) || +	    (fmt == ROT_CONTROL_FMT_RGB888)) +		return true; + +	return false; +} + +static int rotator_src_set_size(struct device *dev, int swap, +		struct drm_exynos_pos *pos, +		struct drm_exynos_sz *sz) +{ +	struct rot_context *rot = dev_get_drvdata(dev); +	u32 fmt, hsize, vsize; +	u32 val; + +	/* Get format */ +	fmt = rotator_reg_get_fmt(rot); +	if (!rotator_check_reg_fmt(fmt)) { +		DRM_ERROR("invalid format.\n"); +		return -EINVAL; +	} + +	/* Align buffer size */ +	hsize = sz->hsize; +	vsize = sz->vsize; +	rotator_align_size(rot, fmt, &hsize, &vsize); + +	/* Set buffer size configuration */ +	val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize); +	rot_write(val, ROT_SRC_BUF_SIZE); + +	/* Set crop image position configuration */ +	val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x); +	rot_write(val, ROT_SRC_CROP_POS); +	val = ROT_SRC_CROP_SIZE_H(pos->h) | ROT_SRC_CROP_SIZE_W(pos->w); +	rot_write(val, ROT_SRC_CROP_SIZE); + +	return 0; +} + +static int rotator_src_set_addr(struct device *dev, +		struct drm_exynos_ipp_buf_info *buf_info, +		u32 buf_id, enum drm_exynos_ipp_buf_type buf_type) +{ +	struct rot_context *rot = dev_get_drvdata(dev); +	dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX]; +	u32 val, fmt, hsize, vsize; +	int i; + +	/* Set current buf_id */ +	rot->cur_buf_id[EXYNOS_DRM_OPS_SRC] = buf_id; + +	switch (buf_type) { +	case IPP_BUF_ENQUEUE: +		/* Set address configuration */ +		for_each_ipp_planar(i) +			addr[i] = buf_info->base[i]; + +		/* Get format */ +		fmt = rotator_reg_get_fmt(rot); +		if (!rotator_check_reg_fmt(fmt)) { +			DRM_ERROR("invalid format.\n"); +			return -EINVAL; +		} + +		/* Re-set cb planar for NV12 format */ +		if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) && +		    !addr[EXYNOS_DRM_PLANAR_CB]) { + +			val = rot_read(ROT_SRC_BUF_SIZE); +			hsize = ROT_GET_BUF_SIZE_W(val); +			vsize = ROT_GET_BUF_SIZE_H(val); + +			/* Set cb planar */ +			addr[EXYNOS_DRM_PLANAR_CB] = +				addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize; +		} + +		for_each_ipp_planar(i) +			rot_write(addr[i], ROT_SRC_BUF_ADDR(i)); +		break; +	case IPP_BUF_DEQUEUE: +		for_each_ipp_planar(i) +			rot_write(0x0, ROT_SRC_BUF_ADDR(i)); +		break; +	default: +		/* Nothing to do */ +		break; +	} + +	return 0; +} + +static int rotator_dst_set_transf(struct device *dev, +		enum drm_exynos_degree degree, +		enum drm_exynos_flip flip, bool *swap) +{ +	struct rot_context *rot = dev_get_drvdata(dev); +	u32 val; + +	/* Set transform configuration */ +	val = rot_read(ROT_CONTROL); +	val &= ~ROT_CONTROL_FLIP_MASK; + +	switch (flip) { +	case EXYNOS_DRM_FLIP_VERTICAL: +		val |= ROT_CONTROL_FLIP_VERTICAL; +		break; +	case EXYNOS_DRM_FLIP_HORIZONTAL: +		val |= ROT_CONTROL_FLIP_HORIZONTAL; +		break; +	default: +		/* Flip None */ +		break; +	} + +	val &= ~ROT_CONTROL_ROT_MASK; + +	switch (degree) { +	case EXYNOS_DRM_DEGREE_90: +		val |= ROT_CONTROL_ROT_90; +		break; +	case EXYNOS_DRM_DEGREE_180: +		val |= ROT_CONTROL_ROT_180; +		break; +	case EXYNOS_DRM_DEGREE_270: +		val |= ROT_CONTROL_ROT_270; +		break; +	default: +		/* Rotation 0 Degree */ +		break; +	} + +	rot_write(val, ROT_CONTROL); + +	/* Check degree for setting buffer size swap */ +	if ((degree == EXYNOS_DRM_DEGREE_90) || +	    (degree == EXYNOS_DRM_DEGREE_270)) +		*swap = true; +	else +		*swap = false; + +	return 0; +} + +static int rotator_dst_set_size(struct device *dev, int swap, +		struct drm_exynos_pos *pos, +		struct drm_exynos_sz *sz) +{ +	struct rot_context *rot = dev_get_drvdata(dev); +	u32 val, fmt, hsize, vsize; + +	/* Get format */ +	fmt = rotator_reg_get_fmt(rot); +	if (!rotator_check_reg_fmt(fmt)) { +		DRM_ERROR("invalid format.\n"); +		return -EINVAL; +	} + +	/* Align buffer size */ +	hsize = sz->hsize; +	vsize = sz->vsize; +	rotator_align_size(rot, fmt, &hsize, &vsize); + +	/* Set buffer size configuration */ +	val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize); +	rot_write(val, ROT_DST_BUF_SIZE); + +	/* Set crop image position configuration */ +	val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x); +	rot_write(val, ROT_DST_CROP_POS); + +	return 0; +} + +static int rotator_dst_set_addr(struct device *dev, +		struct drm_exynos_ipp_buf_info *buf_info, +		u32 buf_id, enum drm_exynos_ipp_buf_type buf_type) +{ +	struct rot_context *rot = dev_get_drvdata(dev); +	dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX]; +	u32 val, fmt, hsize, vsize; +	int i; + +	/* Set current buf_id */ +	rot->cur_buf_id[EXYNOS_DRM_OPS_DST] = buf_id; + +	switch (buf_type) { +	case IPP_BUF_ENQUEUE: +		/* Set address configuration */ +		for_each_ipp_planar(i) +			addr[i] = buf_info->base[i]; + +		/* Get format */ +		fmt = rotator_reg_get_fmt(rot); +		if (!rotator_check_reg_fmt(fmt)) { +			DRM_ERROR("invalid format.\n"); +			return -EINVAL; +		} + +		/* Re-set cb planar for NV12 format */ +		if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) && +		    !addr[EXYNOS_DRM_PLANAR_CB]) { +			/* Get buf size */ +			val = rot_read(ROT_DST_BUF_SIZE); + +			hsize = ROT_GET_BUF_SIZE_W(val); +			vsize = ROT_GET_BUF_SIZE_H(val); + +			/* Set cb planar */ +			addr[EXYNOS_DRM_PLANAR_CB] = +				addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize; +		} + +		for_each_ipp_planar(i) +			rot_write(addr[i], ROT_DST_BUF_ADDR(i)); +		break; +	case IPP_BUF_DEQUEUE: +		for_each_ipp_planar(i) +			rot_write(0x0, ROT_DST_BUF_ADDR(i)); +		break; +	default: +		/* Nothing to do */ +		break; +	} + +	return 0; +} + +static struct exynos_drm_ipp_ops rot_src_ops = { +	.set_fmt	=	rotator_src_set_fmt, +	.set_size	=	rotator_src_set_size, +	.set_addr	=	rotator_src_set_addr, +}; + +static struct exynos_drm_ipp_ops rot_dst_ops = { +	.set_transf	=	rotator_dst_set_transf, +	.set_size	=	rotator_dst_set_size, +	.set_addr	=	rotator_dst_set_addr, +}; + +static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv) +{ +	struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list; + +	prop_list->version = 1; +	prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) | +				(1 << EXYNOS_DRM_FLIP_HORIZONTAL); +	prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) | +				(1 << EXYNOS_DRM_DEGREE_90) | +				(1 << EXYNOS_DRM_DEGREE_180) | +				(1 << EXYNOS_DRM_DEGREE_270); +	prop_list->csc = 0; +	prop_list->crop = 0; +	prop_list->scale = 0; + +	return 0; +} + +static inline bool rotator_check_drm_fmt(u32 fmt) +{ +	switch (fmt) { +	case DRM_FORMAT_XRGB8888: +	case DRM_FORMAT_NV12: +		return true; +	default: +		DRM_DEBUG_KMS("not support format\n"); +		return false; +	} +} + +static inline bool rotator_check_drm_flip(enum drm_exynos_flip flip) +{ +	switch (flip) { +	case EXYNOS_DRM_FLIP_NONE: +	case EXYNOS_DRM_FLIP_VERTICAL: +	case EXYNOS_DRM_FLIP_HORIZONTAL: +	case EXYNOS_DRM_FLIP_BOTH: +		return true; +	default: +		DRM_DEBUG_KMS("invalid flip\n"); +		return false; +	} +} + +static int rotator_ippdrv_check_property(struct device *dev, +		struct drm_exynos_ipp_property *property) +{ +	struct drm_exynos_ipp_config *src_config = +					&property->config[EXYNOS_DRM_OPS_SRC]; +	struct drm_exynos_ipp_config *dst_config = +					&property->config[EXYNOS_DRM_OPS_DST]; +	struct drm_exynos_pos *src_pos = &src_config->pos; +	struct drm_exynos_pos *dst_pos = &dst_config->pos; +	struct drm_exynos_sz *src_sz = &src_config->sz; +	struct drm_exynos_sz *dst_sz = &dst_config->sz; +	bool swap = false; + +	/* Check format configuration */ +	if (src_config->fmt != dst_config->fmt) { +		DRM_DEBUG_KMS("not support csc feature\n"); +		return -EINVAL; +	} + +	if (!rotator_check_drm_fmt(dst_config->fmt)) { +		DRM_DEBUG_KMS("invalid format\n"); +		return -EINVAL; +	} + +	/* Check transform configuration */ +	if (src_config->degree != EXYNOS_DRM_DEGREE_0) { +		DRM_DEBUG_KMS("not support source-side rotation\n"); +		return -EINVAL; +	} + +	switch (dst_config->degree) { +	case EXYNOS_DRM_DEGREE_90: +	case EXYNOS_DRM_DEGREE_270: +		swap = true; +	case EXYNOS_DRM_DEGREE_0: +	case EXYNOS_DRM_DEGREE_180: +		/* No problem */ +		break; +	default: +		DRM_DEBUG_KMS("invalid degree\n"); +		return -EINVAL; +	} + +	if (src_config->flip != EXYNOS_DRM_FLIP_NONE) { +		DRM_DEBUG_KMS("not support source-side flip\n"); +		return -EINVAL; +	} + +	if (!rotator_check_drm_flip(dst_config->flip)) { +		DRM_DEBUG_KMS("invalid flip\n"); +		return -EINVAL; +	} + +	/* Check size configuration */ +	if ((src_pos->x + src_pos->w > src_sz->hsize) || +		(src_pos->y + src_pos->h > src_sz->vsize)) { +		DRM_DEBUG_KMS("out of source buffer bound\n"); +		return -EINVAL; +	} + +	if (swap) { +		if ((dst_pos->x + dst_pos->h > dst_sz->vsize) || +			(dst_pos->y + dst_pos->w > dst_sz->hsize)) { +			DRM_DEBUG_KMS("out of destination buffer bound\n"); +			return -EINVAL; +		} + +		if ((src_pos->w != dst_pos->h) || (src_pos->h != dst_pos->w)) { +			DRM_DEBUG_KMS("not support scale feature\n"); +			return -EINVAL; +		} +	} else { +		if ((dst_pos->x + dst_pos->w > dst_sz->hsize) || +			(dst_pos->y + dst_pos->h > dst_sz->vsize)) { +			DRM_DEBUG_KMS("out of destination buffer bound\n"); +			return -EINVAL; +		} + +		if ((src_pos->w != dst_pos->w) || (src_pos->h != dst_pos->h)) { +			DRM_DEBUG_KMS("not support scale feature\n"); +			return -EINVAL; +		} +	} + +	return 0; +} + +static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd) +{ +	struct rot_context *rot = dev_get_drvdata(dev); +	u32 val; + +	if (rot->suspended) { +		DRM_ERROR("suspended state\n"); +		return -EPERM; +	} + +	if (cmd != IPP_CMD_M2M) { +		DRM_ERROR("not support cmd: %d\n", cmd); +		return -EINVAL; +	} + +	/* Set interrupt enable */ +	rotator_reg_set_irq(rot, true); + +	val = rot_read(ROT_CONTROL); +	val |= ROT_CONTROL_START; + +	rot_write(val, ROT_CONTROL); + +	return 0; +} + +static struct rot_limit_table rot_limit_tbl_4210 = { +	.ycbcr420_2p = { +		.min_w = 32, +		.min_h = 32, +		.max_w = SZ_64K, +		.max_h = SZ_64K, +		.align = 3, +	}, +	.rgb888 = { +		.min_w = 8, +		.min_h = 8, +		.max_w = SZ_16K, +		.max_h = SZ_16K, +		.align = 2, +	}, +}; + +static struct rot_limit_table rot_limit_tbl_4x12 = { +	.ycbcr420_2p = { +		.min_w = 32, +		.min_h = 32, +		.max_w = SZ_32K, +		.max_h = SZ_32K, +		.align = 3, +	}, +	.rgb888 = { +		.min_w = 8, +		.min_h = 8, +		.max_w = SZ_8K, +		.max_h = SZ_8K, +		.align = 2, +	}, +}; + +static struct rot_limit_table rot_limit_tbl_5250 = { +	.ycbcr420_2p = { +		.min_w = 32, +		.min_h = 32, +		.max_w = SZ_32K, +		.max_h = SZ_32K, +		.align = 3, +	}, +	.rgb888 = { +		.min_w = 8, +		.min_h = 8, +		.max_w = SZ_8K, +		.max_h = SZ_8K, +		.align = 1, +	}, +}; + +static const struct of_device_id exynos_rotator_match[] = { +	{ +		.compatible = "samsung,exynos4210-rotator", +		.data = &rot_limit_tbl_4210, +	}, +	{ +		.compatible = "samsung,exynos4212-rotator", +		.data = &rot_limit_tbl_4x12, +	}, +	{ +		.compatible = "samsung,exynos5250-rotator", +		.data = &rot_limit_tbl_5250, +	}, +	{}, +}; + +static int rotator_probe(struct platform_device *pdev) +{ +	struct device *dev = &pdev->dev; +	struct rot_context *rot; +	struct exynos_drm_ippdrv *ippdrv; +	const struct of_device_id *match; +	int ret; + +	if (!dev->of_node) { +		dev_err(dev, "cannot find of_node.\n"); +		return -ENODEV; +	} + +	rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL); +	if (!rot) +		return -ENOMEM; + +	match = of_match_node(exynos_rotator_match, dev->of_node); +	if (!match) { +		dev_err(dev, "failed to match node\n"); +		return -ENODEV; +	} +	rot->limit_tbl = (struct rot_limit_table *)match->data; + +	rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	rot->regs = devm_ioremap_resource(dev, rot->regs_res); +	if (IS_ERR(rot->regs)) +		return PTR_ERR(rot->regs); + +	rot->irq = platform_get_irq(pdev, 0); +	if (rot->irq < 0) { +		dev_err(dev, "failed to get irq\n"); +		return rot->irq; +	} + +	ret = devm_request_threaded_irq(dev, rot->irq, NULL, +			rotator_irq_handler, IRQF_ONESHOT, "drm_rotator", rot); +	if (ret < 0) { +		dev_err(dev, "failed to request irq\n"); +		return ret; +	} + +	rot->clock = devm_clk_get(dev, "rotator"); +	if (IS_ERR(rot->clock)) { +		dev_err(dev, "failed to get clock\n"); +		return PTR_ERR(rot->clock); +	} + +	pm_runtime_enable(dev); + +	ippdrv = &rot->ippdrv; +	ippdrv->dev = dev; +	ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &rot_src_ops; +	ippdrv->ops[EXYNOS_DRM_OPS_DST] = &rot_dst_ops; +	ippdrv->check_property = rotator_ippdrv_check_property; +	ippdrv->start = rotator_ippdrv_start; +	ret = rotator_init_prop_list(ippdrv); +	if (ret < 0) { +		dev_err(dev, "failed to init property list.\n"); +		goto err_ippdrv_register; +	} + +	DRM_DEBUG_KMS("ippdrv[0x%x]\n", (int)ippdrv); + +	platform_set_drvdata(pdev, rot); + +	ret = exynos_drm_ippdrv_register(ippdrv); +	if (ret < 0) { +		dev_err(dev, "failed to register drm rotator device\n"); +		goto err_ippdrv_register; +	} + +	dev_info(dev, "The exynos rotator is probed successfully\n"); + +	return 0; + +err_ippdrv_register: +	pm_runtime_disable(dev); +	return ret; +} + +static int rotator_remove(struct platform_device *pdev) +{ +	struct device *dev = &pdev->dev; +	struct rot_context *rot = dev_get_drvdata(dev); +	struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv; + +	exynos_drm_ippdrv_unregister(ippdrv); + +	pm_runtime_disable(dev); + +	return 0; +} + +static int rotator_clk_crtl(struct rot_context *rot, bool enable) +{ +	if (enable) { +		clk_enable(rot->clock); +		rot->suspended = false; +	} else { +		clk_disable(rot->clock); +		rot->suspended = true; +	} + +	return 0; +} + + +#ifdef CONFIG_PM_SLEEP +static int rotator_suspend(struct device *dev) +{ +	struct rot_context *rot = dev_get_drvdata(dev); + +	if (pm_runtime_suspended(dev)) +		return 0; + +	return rotator_clk_crtl(rot, false); +} + +static int rotator_resume(struct device *dev) +{ +	struct rot_context *rot = dev_get_drvdata(dev); + +	if (!pm_runtime_suspended(dev)) +		return rotator_clk_crtl(rot, true); + +	return 0; +} +#endif + +#ifdef CONFIG_PM_RUNTIME +static int rotator_runtime_suspend(struct device *dev) +{ +	struct rot_context *rot = dev_get_drvdata(dev); + +	return  rotator_clk_crtl(rot, false); +} + +static int rotator_runtime_resume(struct device *dev) +{ +	struct rot_context *rot = dev_get_drvdata(dev); + +	return  rotator_clk_crtl(rot, true); +} +#endif + +static const struct dev_pm_ops rotator_pm_ops = { +	SET_SYSTEM_SLEEP_PM_OPS(rotator_suspend, rotator_resume) +	SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume, +									NULL) +}; + +struct platform_driver rotator_driver = { +	.probe		= rotator_probe, +	.remove		= rotator_remove, +	.driver		= { +		.name	= "exynos-rot", +		.owner	= THIS_MODULE, +		.pm	= &rotator_pm_ops, +		.of_match_table = exynos_rotator_match, +	}, +}; diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.h b/drivers/gpu/drm/exynos/exynos_drm_rotator.h new file mode 100644 index 00000000000..71a0b4c0c1e --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.h @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * + * Authors: + *	YoungJun Cho <yj44.cho@samsung.com> + *	Eunchul Kim <chulspro.kim@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#ifndef	_EXYNOS_DRM_ROTATOR_H_ +#define	_EXYNOS_DRM_ROTATOR_H_ + +/* TODO */ + +#endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c new file mode 100644 index 00000000000..2fb8705d646 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -0,0 +1,702 @@ +/* exynos_drm_vidi.c + * + * Copyright (C) 2012 Samsung Electronics Co.Ltd + * Authors: + *	Inki Dae <inki.dae@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + * + */ +#include <drm/drmP.h> + +#include <linux/kernel.h> +#include <linux/platform_device.h> + +#include <drm/exynos_drm.h> + +#include <drm/drm_edid.h> +#include <drm/drm_crtc_helper.h> + +#include "exynos_drm_drv.h" +#include "exynos_drm_crtc.h" +#include "exynos_drm_encoder.h" +#include "exynos_drm_vidi.h" + +/* vidi has totally three virtual windows. */ +#define WINDOWS_NR		3 + +#define get_vidi_mgr(dev)	platform_get_drvdata(to_platform_device(dev)) +#define ctx_from_connector(c)	container_of(c, struct vidi_context, \ +					connector) + +struct vidi_win_data { +	unsigned int		offset_x; +	unsigned int		offset_y; +	unsigned int		ovl_width; +	unsigned int		ovl_height; +	unsigned int		fb_width; +	unsigned int		fb_height; +	unsigned int		bpp; +	dma_addr_t		dma_addr; +	unsigned int		buf_offsize; +	unsigned int		line_size;	/* bytes */ +	bool			enabled; +}; + +struct vidi_context { +	struct drm_device		*drm_dev; +	struct drm_crtc			*crtc; +	struct drm_encoder		*encoder; +	struct drm_connector		connector; +	struct exynos_drm_subdrv	subdrv; +	struct vidi_win_data		win_data[WINDOWS_NR]; +	struct edid			*raw_edid; +	unsigned int			clkdiv; +	unsigned int			default_win; +	unsigned long			irq_flags; +	unsigned int			connected; +	bool				vblank_on; +	bool				suspended; +	bool				direct_vblank; +	struct work_struct		work; +	struct mutex			lock; +	int				pipe; +}; + +static const char fake_edid_info[] = { +	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x4c, 0x2d, 0x05, 0x05, +	0x00, 0x00, 0x00, 0x00, 0x30, 0x12, 0x01, 0x03, 0x80, 0x10, 0x09, 0x78, +	0x0a, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26, 0x0f, 0x50, 0x54, 0xbd, +	0xee, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, +	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x66, 0x21, 0x50, 0xb0, 0x51, 0x00, +	0x1b, 0x30, 0x40, 0x70, 0x36, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e, +	0x01, 0x1d, 0x00, 0x72, 0x51, 0xd0, 0x1e, 0x20, 0x6e, 0x28, 0x55, 0x00, +	0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, +	0x4b, 0x1a, 0x44, 0x17, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, +	0x00, 0x00, 0x00, 0xfc, 0x00, 0x53, 0x41, 0x4d, 0x53, 0x55, 0x4e, 0x47, +	0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x01, 0xbc, 0x02, 0x03, 0x1e, 0xf1, +	0x46, 0x84, 0x05, 0x03, 0x10, 0x20, 0x22, 0x23, 0x09, 0x07, 0x07, 0x83, +	0x01, 0x00, 0x00, 0xe2, 0x00, 0x0f, 0x67, 0x03, 0x0c, 0x00, 0x10, 0x00, +	0xb8, 0x2d, 0x01, 0x1d, 0x80, 0x18, 0x71, 0x1c, 0x16, 0x20, 0x58, 0x2c, +	0x25, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x9e, 0x8c, 0x0a, 0xd0, 0x8a, +	0x20, 0xe0, 0x2d, 0x10, 0x10, 0x3e, 0x96, 0x00, 0xa0, 0x5a, 0x00, 0x00, +	0x00, 0x18, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c, +	0x45, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00, +	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +	0x00, 0x00, 0x00, 0x06 +}; + +static void vidi_apply(struct exynos_drm_manager *mgr) +{ +	struct vidi_context *ctx = mgr->ctx; +	struct exynos_drm_manager_ops *mgr_ops = mgr->ops; +	struct vidi_win_data *win_data; +	int i; + +	for (i = 0; i < WINDOWS_NR; i++) { +		win_data = &ctx->win_data[i]; +		if (win_data->enabled && (mgr_ops && mgr_ops->win_commit)) +			mgr_ops->win_commit(mgr, i); +	} + +	if (mgr_ops && mgr_ops->commit) +		mgr_ops->commit(mgr); +} + +static void vidi_commit(struct exynos_drm_manager *mgr) +{ +	struct vidi_context *ctx = mgr->ctx; + +	if (ctx->suspended) +		return; +} + +static int vidi_enable_vblank(struct exynos_drm_manager *mgr) +{ +	struct vidi_context *ctx = mgr->ctx; + +	if (ctx->suspended) +		return -EPERM; + +	if (!test_and_set_bit(0, &ctx->irq_flags)) +		ctx->vblank_on = true; + +	ctx->direct_vblank = true; + +	/* +	 * in case of page flip request, vidi_finish_pageflip function +	 * will not be called because direct_vblank is true and then +	 * that function will be called by manager_ops->win_commit callback +	 */ +	schedule_work(&ctx->work); + +	return 0; +} + +static void vidi_disable_vblank(struct exynos_drm_manager *mgr) +{ +	struct vidi_context *ctx = mgr->ctx; + +	if (ctx->suspended) +		return; + +	if (test_and_clear_bit(0, &ctx->irq_flags)) +		ctx->vblank_on = false; +} + +static void vidi_win_mode_set(struct exynos_drm_manager *mgr, +			struct exynos_drm_overlay *overlay) +{ +	struct vidi_context *ctx = mgr->ctx; +	struct vidi_win_data *win_data; +	int win; +	unsigned long offset; + +	if (!overlay) { +		DRM_ERROR("overlay is NULL\n"); +		return; +	} + +	win = overlay->zpos; +	if (win == DEFAULT_ZPOS) +		win = ctx->default_win; + +	if (win < 0 || win >= WINDOWS_NR) +		return; + +	offset = overlay->fb_x * (overlay->bpp >> 3); +	offset += overlay->fb_y * overlay->pitch; + +	DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch); + +	win_data = &ctx->win_data[win]; + +	win_data->offset_x = overlay->crtc_x; +	win_data->offset_y = overlay->crtc_y; +	win_data->ovl_width = overlay->crtc_width; +	win_data->ovl_height = overlay->crtc_height; +	win_data->fb_width = overlay->fb_width; +	win_data->fb_height = overlay->fb_height; +	win_data->dma_addr = overlay->dma_addr[0] + offset; +	win_data->bpp = overlay->bpp; +	win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * +				(overlay->bpp >> 3); +	win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3); + +	/* +	 * some parts of win_data should be transferred to user side +	 * through specific ioctl. +	 */ + +	DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n", +			win_data->offset_x, win_data->offset_y); +	DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", +			win_data->ovl_width, win_data->ovl_height); +	DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr); +	DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n", +			overlay->fb_width, overlay->crtc_width); +} + +static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos) +{ +	struct vidi_context *ctx = mgr->ctx; +	struct vidi_win_data *win_data; +	int win = zpos; + +	if (ctx->suspended) +		return; + +	if (win == DEFAULT_ZPOS) +		win = ctx->default_win; + +	if (win < 0 || win >= WINDOWS_NR) +		return; + +	win_data = &ctx->win_data[win]; + +	win_data->enabled = true; + +	DRM_DEBUG_KMS("dma_addr = %pad\n", &win_data->dma_addr); + +	if (ctx->vblank_on) +		schedule_work(&ctx->work); +} + +static void vidi_win_disable(struct exynos_drm_manager *mgr, int zpos) +{ +	struct vidi_context *ctx = mgr->ctx; +	struct vidi_win_data *win_data; +	int win = zpos; + +	if (win == DEFAULT_ZPOS) +		win = ctx->default_win; + +	if (win < 0 || win >= WINDOWS_NR) +		return; + +	win_data = &ctx->win_data[win]; +	win_data->enabled = false; + +	/* TODO. */ +} + +static int vidi_power_on(struct exynos_drm_manager *mgr, bool enable) +{ +	struct vidi_context *ctx = mgr->ctx; + +	DRM_DEBUG_KMS("%s\n", __FILE__); + +	if (enable != false && enable != true) +		return -EINVAL; + +	if (enable) { +		ctx->suspended = false; + +		/* if vblank was enabled status, enable it again. */ +		if (test_and_clear_bit(0, &ctx->irq_flags)) +			vidi_enable_vblank(mgr); + +		vidi_apply(mgr); +	} else { +		ctx->suspended = true; +	} + +	return 0; +} + +static void vidi_dpms(struct exynos_drm_manager *mgr, int mode) +{ +	struct vidi_context *ctx = mgr->ctx; + +	DRM_DEBUG_KMS("%d\n", mode); + +	mutex_lock(&ctx->lock); + +	switch (mode) { +	case DRM_MODE_DPMS_ON: +		vidi_power_on(mgr, true); +		break; +	case DRM_MODE_DPMS_STANDBY: +	case DRM_MODE_DPMS_SUSPEND: +	case DRM_MODE_DPMS_OFF: +		vidi_power_on(mgr, false); +		break; +	default: +		DRM_DEBUG_KMS("unspecified mode %d\n", mode); +		break; +	} + +	mutex_unlock(&ctx->lock); +} + +static int vidi_mgr_initialize(struct exynos_drm_manager *mgr, +			struct drm_device *drm_dev) +{ +	struct vidi_context *ctx = mgr->ctx; +	struct exynos_drm_private *priv = drm_dev->dev_private; + +	mgr->drm_dev = ctx->drm_dev = drm_dev; +	mgr->pipe = ctx->pipe = priv->pipe++; + +	/* +	 * enable drm irq mode. +	 * - with irq_enabled = 1, we can use the vblank feature. +	 * +	 * P.S. note that we wouldn't use drm irq handler but +	 *	just specific driver own one instead because +	 *	drm framework supports only one irq handler. +	 */ +	drm_dev->irq_enabled = 1; + +	/* +	 * with vblank_disable_allowed = 1, vblank interrupt will be disabled +	 * by drm timer once a current process gives up ownership of +	 * vblank event.(after drm_vblank_put function is called) +	 */ +	drm_dev->vblank_disable_allowed = 1; + +	return 0; +} + +static struct exynos_drm_manager_ops vidi_manager_ops = { +	.dpms = vidi_dpms, +	.commit = vidi_commit, +	.enable_vblank = vidi_enable_vblank, +	.disable_vblank = vidi_disable_vblank, +	.win_mode_set = vidi_win_mode_set, +	.win_commit = vidi_win_commit, +	.win_disable = vidi_win_disable, +}; + +static struct exynos_drm_manager vidi_manager = { +	.type = EXYNOS_DISPLAY_TYPE_VIDI, +	.ops = &vidi_manager_ops, +}; + +static void vidi_fake_vblank_handler(struct work_struct *work) +{ +	struct vidi_context *ctx = container_of(work, struct vidi_context, +					work); + +	if (ctx->pipe < 0) +		return; + +	/* refresh rate is about 50Hz. */ +	usleep_range(16000, 20000); + +	mutex_lock(&ctx->lock); + +	if (ctx->direct_vblank) { +		drm_handle_vblank(ctx->drm_dev, ctx->pipe); +		ctx->direct_vblank = false; +		mutex_unlock(&ctx->lock); +		return; +	} + +	mutex_unlock(&ctx->lock); + +	exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); +} + +static int vidi_show_connection(struct device *dev, +				struct device_attribute *attr, char *buf) +{ +	int rc; +	struct exynos_drm_manager *mgr = get_vidi_mgr(dev); +	struct vidi_context *ctx = mgr->ctx; + +	mutex_lock(&ctx->lock); + +	rc = sprintf(buf, "%d\n", ctx->connected); + +	mutex_unlock(&ctx->lock); + +	return rc; +} + +static int vidi_store_connection(struct device *dev, +				struct device_attribute *attr, +				const char *buf, size_t len) +{ +	struct exynos_drm_manager *mgr = get_vidi_mgr(dev); +	struct vidi_context *ctx = mgr->ctx; +	int ret; + +	ret = kstrtoint(buf, 0, &ctx->connected); +	if (ret) +		return ret; + +	if (ctx->connected > 1) +		return -EINVAL; + +	/* use fake edid data for test. */ +	if (!ctx->raw_edid) +		ctx->raw_edid = (struct edid *)fake_edid_info; + +	/* if raw_edid isn't same as fake data then it can't be tested. */ +	if (ctx->raw_edid != (struct edid *)fake_edid_info) { +		DRM_DEBUG_KMS("edid data is not fake data.\n"); +		return -EINVAL; +	} + +	DRM_DEBUG_KMS("requested connection.\n"); + +	drm_helper_hpd_irq_event(ctx->drm_dev); + +	return len; +} + +static DEVICE_ATTR(connection, 0644, vidi_show_connection, +			vidi_store_connection); + +int vidi_connection_ioctl(struct drm_device *drm_dev, void *data, +				struct drm_file *file_priv) +{ +	struct vidi_context *ctx = NULL; +	struct drm_encoder *encoder; +	struct exynos_drm_display *display; +	struct drm_exynos_vidi_connection *vidi = data; + +	if (!vidi) { +		DRM_DEBUG_KMS("user data for vidi is null.\n"); +		return -EINVAL; +	} + +	if (vidi->connection > 1) { +		DRM_DEBUG_KMS("connection should be 0 or 1.\n"); +		return -EINVAL; +	} + +	list_for_each_entry(encoder, &drm_dev->mode_config.encoder_list, +								head) { +		display = exynos_drm_get_display(encoder); + +		if (display->type == EXYNOS_DISPLAY_TYPE_VIDI) { +			ctx = display->ctx; +			break; +		} +	} + +	if (!ctx) { +		DRM_DEBUG_KMS("not found virtual device type encoder.\n"); +		return -EINVAL; +	} + +	if (ctx->connected == vidi->connection) { +		DRM_DEBUG_KMS("same connection request.\n"); +		return -EINVAL; +	} + +	if (vidi->connection) { +		struct edid *raw_edid  = (struct edid *)(uint32_t)vidi->edid; +		if (!drm_edid_is_valid(raw_edid)) { +			DRM_DEBUG_KMS("edid data is invalid.\n"); +			return -EINVAL; +		} +		ctx->raw_edid = drm_edid_duplicate(raw_edid); +		if (!ctx->raw_edid) { +			DRM_DEBUG_KMS("failed to allocate raw_edid.\n"); +			return -ENOMEM; +		} +	} else { +		/* +		 * with connection = 0, free raw_edid +		 * only if raw edid data isn't same as fake data. +		 */ +		if (ctx->raw_edid && ctx->raw_edid != +				(struct edid *)fake_edid_info) { +			kfree(ctx->raw_edid); +			ctx->raw_edid = NULL; +		} +	} + +	ctx->connected = vidi->connection; +	drm_helper_hpd_irq_event(ctx->drm_dev); + +	return 0; +} + +static enum drm_connector_status vidi_detect(struct drm_connector *connector, +			bool force) +{ +	struct vidi_context *ctx = ctx_from_connector(connector); + +	/* +	 * connection request would come from user side +	 * to do hotplug through specific ioctl. +	 */ +	return ctx->connected ? connector_status_connected : +			connector_status_disconnected; +} + +static void vidi_connector_destroy(struct drm_connector *connector) +{ +} + +static struct drm_connector_funcs vidi_connector_funcs = { +	.dpms = drm_helper_connector_dpms, +	.fill_modes = drm_helper_probe_single_connector_modes, +	.detect = vidi_detect, +	.destroy = vidi_connector_destroy, +}; + +static int vidi_get_modes(struct drm_connector *connector) +{ +	struct vidi_context *ctx = ctx_from_connector(connector); +	struct edid *edid; +	int edid_len; + +	/* +	 * the edid data comes from user side and it would be set +	 * to ctx->raw_edid through specific ioctl. +	 */ +	if (!ctx->raw_edid) { +		DRM_DEBUG_KMS("raw_edid is null.\n"); +		return -EFAULT; +	} + +	edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH; +	edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL); +	if (!edid) { +		DRM_DEBUG_KMS("failed to allocate edid\n"); +		return -ENOMEM; +	} + +	drm_mode_connector_update_edid_property(connector, edid); + +	return drm_add_edid_modes(connector, edid); +} + +static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector) +{ +	struct vidi_context *ctx = ctx_from_connector(connector); + +	return ctx->encoder; +} + +static struct drm_connector_helper_funcs vidi_connector_helper_funcs = { +	.get_modes = vidi_get_modes, +	.best_encoder = vidi_best_encoder, +}; + +static int vidi_create_connector(struct exynos_drm_display *display, +				struct drm_encoder *encoder) +{ +	struct vidi_context *ctx = display->ctx; +	struct drm_connector *connector = &ctx->connector; +	int ret; + +	ctx->encoder = encoder; +	connector->polled = DRM_CONNECTOR_POLL_HPD; + +	ret = drm_connector_init(ctx->drm_dev, connector, +			&vidi_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); +	if (ret) { +		DRM_ERROR("Failed to initialize connector with drm\n"); +		return ret; +	} + +	drm_connector_helper_add(connector, &vidi_connector_helper_funcs); +	drm_sysfs_connector_add(connector); +	drm_mode_connector_attach_encoder(connector, encoder); + +	return 0; +} + + +static struct exynos_drm_display_ops vidi_display_ops = { +	.create_connector = vidi_create_connector, +}; + +static struct exynos_drm_display vidi_display = { +	.type = EXYNOS_DISPLAY_TYPE_VIDI, +	.ops = &vidi_display_ops, +}; + +static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev) +{ +	struct exynos_drm_manager *mgr = get_vidi_mgr(dev); +	struct vidi_context *ctx = mgr->ctx; +	struct drm_crtc *crtc = ctx->crtc; +	int ret; + +	vidi_mgr_initialize(mgr, drm_dev); + +	ret = exynos_drm_crtc_create(&vidi_manager); +	if (ret) { +		DRM_ERROR("failed to create crtc.\n"); +		return ret; +	} + +	ret = exynos_drm_create_enc_conn(drm_dev, &vidi_display); +	if (ret) { +		crtc->funcs->destroy(crtc); +		DRM_ERROR("failed to create encoder and connector.\n"); +		return ret; +	} + +	return 0; +} + +static int vidi_probe(struct platform_device *pdev) +{ +	struct exynos_drm_subdrv *subdrv; +	struct vidi_context *ctx; +	int ret; + +	ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); +	if (!ctx) +		return -ENOMEM; + +	ctx->default_win = 0; + +	INIT_WORK(&ctx->work, vidi_fake_vblank_handler); + +	vidi_manager.ctx = ctx; +	vidi_display.ctx = ctx; + +	mutex_init(&ctx->lock); + +	platform_set_drvdata(pdev, &vidi_manager); + +	subdrv = &ctx->subdrv; +	subdrv->dev = &pdev->dev; +	subdrv->probe = vidi_subdrv_probe; + +	ret = exynos_drm_subdrv_register(subdrv); +	if (ret < 0) { +		dev_err(&pdev->dev, "failed to register drm vidi device\n"); +		return ret; +	} + +	ret = device_create_file(&pdev->dev, &dev_attr_connection); +	if (ret < 0) { +		exynos_drm_subdrv_unregister(subdrv); +		DRM_INFO("failed to create connection sysfs.\n"); +	} + +	return 0; +} + +static int vidi_remove(struct platform_device *pdev) +{ +	struct exynos_drm_manager *mgr = platform_get_drvdata(pdev); +	struct vidi_context *ctx = mgr->ctx; +	struct drm_encoder *encoder = ctx->encoder; +	struct drm_crtc *crtc = mgr->crtc; + +	if (ctx->raw_edid != (struct edid *)fake_edid_info) { +		kfree(ctx->raw_edid); +		ctx->raw_edid = NULL; + +		return -EINVAL; +	} + +	crtc->funcs->destroy(crtc); +	encoder->funcs->destroy(encoder); +	drm_connector_cleanup(&ctx->connector); + +	return 0; +} + +struct platform_driver vidi_driver = { +	.probe		= vidi_probe, +	.remove		= vidi_remove, +	.driver		= { +		.name	= "exynos-drm-vidi", +		.owner	= THIS_MODULE, +	}, +}; + +int exynos_drm_probe_vidi(void) +{ +	struct platform_device *pdev; +	int ret; + +	pdev = platform_device_register_simple("exynos-drm-vidi", -1, NULL, 0); +	if (IS_ERR(pdev)) +		return PTR_ERR(pdev); + +	ret = platform_driver_register(&vidi_driver); +	if (ret) { +		platform_device_unregister(pdev); +		return ret; +	} + +	return ret; +} + +void exynos_drm_remove_vidi(void) +{ +	struct vidi_context *ctx = vidi_manager.ctx; +	struct exynos_drm_subdrv *subdrv = &ctx->subdrv; +	struct platform_device *pdev = to_platform_device(subdrv->dev); + +	platform_driver_unregister(&vidi_driver); +	platform_device_unregister(pdev); +} diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.h b/drivers/gpu/drm/exynos/exynos_drm_vidi.h new file mode 100644 index 00000000000..1e5fdaa36cc --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.h @@ -0,0 +1,22 @@ +/* exynos_drm_vidi.h + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * Author: Inki Dae <inki.dae@samsung.com> + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + */ + +#ifndef _EXYNOS_DRM_VIDI_H_ +#define _EXYNOS_DRM_VIDI_H_ + +#ifdef CONFIG_DRM_EXYNOS_VIDI +int vidi_connection_ioctl(struct drm_device *drm_dev, void *data, +				struct drm_file *file_priv); +#else +#define vidi_connection_ioctl	NULL +#endif + +#endif diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c new file mode 100644 index 00000000000..aa259b0a873 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -0,0 +1,2515 @@ +/* + * Copyright (C) 2011 Samsung Electronics Co.Ltd + * Authors: + * Seung-Woo Kim <sw0312.kim@samsung.com> + *	Inki Dae <inki.dae@samsung.com> + *	Joonyoung Shim <jy0922.shim@samsung.com> + * + * Based on drivers/media/video/s5p-tv/hdmi_drv.c + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + * + */ + +#include <drm/drmP.h> +#include <drm/drm_edid.h> +#include <drm/drm_crtc_helper.h> + +#include "regs-hdmi.h" + +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/wait.h> +#include <linux/i2c.h> +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/delay.h> +#include <linux/pm_runtime.h> +#include <linux/clk.h> +#include <linux/regulator/consumer.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_gpio.h> +#include <linux/hdmi.h> +#include <linux/component.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> + +#include <drm/exynos_drm.h> + +#include "exynos_drm_drv.h" +#include "exynos_drm_crtc.h" +#include "exynos_mixer.h" + +#include <linux/gpio.h> +#include <media/s5p_hdmi.h> + +#define get_hdmi_display(dev)	platform_get_drvdata(to_platform_device(dev)) +#define ctx_from_connector(c)	container_of(c, struct hdmi_context, connector) + +#define HOTPLUG_DEBOUNCE_MS		1100 + +/* AVI header and aspect ratio */ +#define HDMI_AVI_VERSION		0x02 +#define HDMI_AVI_LENGTH		0x0D + +/* AUI header info */ +#define HDMI_AUI_VERSION	0x01 +#define HDMI_AUI_LENGTH	0x0A +#define AVI_SAME_AS_PIC_ASPECT_RATIO 0x8 +#define AVI_4_3_CENTER_RATIO	0x9 +#define AVI_16_9_CENTER_RATIO	0xa + +enum hdmi_type { +	HDMI_TYPE13, +	HDMI_TYPE14, +}; + +struct hdmi_driver_data { +	unsigned int type; +	const struct hdmiphy_config *phy_confs; +	unsigned int phy_conf_count; +	unsigned int is_apb_phy:1; +}; + +struct hdmi_resources { +	struct clk			*hdmi; +	struct clk			*sclk_hdmi; +	struct clk			*sclk_pixel; +	struct clk			*sclk_hdmiphy; +	struct clk			*mout_hdmi; +	struct regulator_bulk_data	*regul_bulk; +	int				regul_count; +}; + +struct hdmi_tg_regs { +	u8 cmd[1]; +	u8 h_fsz[2]; +	u8 hact_st[2]; +	u8 hact_sz[2]; +	u8 v_fsz[2]; +	u8 vsync[2]; +	u8 vsync2[2]; +	u8 vact_st[2]; +	u8 vact_sz[2]; +	u8 field_chg[2]; +	u8 vact_st2[2]; +	u8 vact_st3[2]; +	u8 vact_st4[2]; +	u8 vsync_top_hdmi[2]; +	u8 vsync_bot_hdmi[2]; +	u8 field_top_hdmi[2]; +	u8 field_bot_hdmi[2]; +	u8 tg_3d[1]; +}; + +struct hdmi_v13_core_regs { +	u8 h_blank[2]; +	u8 v_blank[3]; +	u8 h_v_line[3]; +	u8 vsync_pol[1]; +	u8 int_pro_mode[1]; +	u8 v_blank_f[3]; +	u8 h_sync_gen[3]; +	u8 v_sync_gen1[3]; +	u8 v_sync_gen2[3]; +	u8 v_sync_gen3[3]; +}; + +struct hdmi_v14_core_regs { +	u8 h_blank[2]; +	u8 v2_blank[2]; +	u8 v1_blank[2]; +	u8 v_line[2]; +	u8 h_line[2]; +	u8 hsync_pol[1]; +	u8 vsync_pol[1]; +	u8 int_pro_mode[1]; +	u8 v_blank_f0[2]; +	u8 v_blank_f1[2]; +	u8 h_sync_start[2]; +	u8 h_sync_end[2]; +	u8 v_sync_line_bef_2[2]; +	u8 v_sync_line_bef_1[2]; +	u8 v_sync_line_aft_2[2]; +	u8 v_sync_line_aft_1[2]; +	u8 v_sync_line_aft_pxl_2[2]; +	u8 v_sync_line_aft_pxl_1[2]; +	u8 v_blank_f2[2]; /* for 3D mode */ +	u8 v_blank_f3[2]; /* for 3D mode */ +	u8 v_blank_f4[2]; /* for 3D mode */ +	u8 v_blank_f5[2]; /* for 3D mode */ +	u8 v_sync_line_aft_3[2]; +	u8 v_sync_line_aft_4[2]; +	u8 v_sync_line_aft_5[2]; +	u8 v_sync_line_aft_6[2]; +	u8 v_sync_line_aft_pxl_3[2]; +	u8 v_sync_line_aft_pxl_4[2]; +	u8 v_sync_line_aft_pxl_5[2]; +	u8 v_sync_line_aft_pxl_6[2]; +	u8 vact_space_1[2]; +	u8 vact_space_2[2]; +	u8 vact_space_3[2]; +	u8 vact_space_4[2]; +	u8 vact_space_5[2]; +	u8 vact_space_6[2]; +}; + +struct hdmi_v13_conf { +	struct hdmi_v13_core_regs core; +	struct hdmi_tg_regs tg; +}; + +struct hdmi_v14_conf { +	struct hdmi_v14_core_regs core; +	struct hdmi_tg_regs tg; +}; + +struct hdmi_conf_regs { +	int pixel_clock; +	int cea_video_id; +	enum hdmi_picture_aspect aspect_ratio; +	union { +		struct hdmi_v13_conf v13_conf; +		struct hdmi_v14_conf v14_conf; +	} conf; +}; + +struct hdmi_context { +	struct device			*dev; +	struct drm_device		*drm_dev; +	struct drm_connector		connector; +	struct drm_encoder		*encoder; +	bool				hpd; +	bool				powered; +	bool				dvi_mode; +	struct mutex			hdmi_mutex; + +	void __iomem			*regs; +	int				irq; +	struct delayed_work		hotplug_work; + +	struct i2c_adapter		*ddc_adpt; +	struct i2c_client		*hdmiphy_port; + +	/* current hdmiphy conf regs */ +	struct drm_display_mode		current_mode; +	struct hdmi_conf_regs		mode_conf; + +	struct hdmi_resources		res; + +	int				hpd_gpio; +	void __iomem			*regs_hdmiphy; +	const struct hdmiphy_config		*phy_confs; +	unsigned int			phy_conf_count; + +	struct regmap			*pmureg; +	enum hdmi_type			type; +}; + +struct hdmiphy_config { +	int pixel_clock; +	u8 conf[32]; +}; + +/* list of phy config settings */ +static const struct hdmiphy_config hdmiphy_v13_configs[] = { +	{ +		.pixel_clock = 27000000, +		.conf = { +			0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40, +			0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87, +			0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0, +			0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00, +		}, +	}, +	{ +		.pixel_clock = 27027000, +		.conf = { +			0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64, +			0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87, +			0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0, +			0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00, +		}, +	}, +	{ +		.pixel_clock = 74176000, +		.conf = { +			0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B, +			0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9, +			0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0, +			0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00, +		}, +	}, +	{ +		.pixel_clock = 74250000, +		.conf = { +			0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40, +			0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba, +			0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0, +			0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00, +		}, +	}, +	{ +		.pixel_clock = 148500000, +		.conf = { +			0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40, +			0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba, +			0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0, +			0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00, +		}, +	}, +}; + +static const struct hdmiphy_config hdmiphy_v14_configs[] = { +	{ +		.pixel_clock = 25200000, +		.conf = { +			0x01, 0x51, 0x2A, 0x75, 0x40, 0x01, 0x00, 0x08, +			0x82, 0x80, 0xfc, 0xd8, 0x45, 0xa0, 0xac, 0x80, +			0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, +			0x54, 0xf4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 27000000, +		.conf = { +			0x01, 0xd1, 0x22, 0x51, 0x40, 0x08, 0xfc, 0x20, +			0x98, 0xa0, 0xcb, 0xd8, 0x45, 0xa0, 0xac, 0x80, +			0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, +			0x54, 0xe4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 27027000, +		.conf = { +			0x01, 0xd1, 0x2d, 0x72, 0x40, 0x64, 0x12, 0x08, +			0x43, 0xa0, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80, +			0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, +			0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, +		}, +	}, +	{ +		.pixel_clock = 36000000, +		.conf = { +			0x01, 0x51, 0x2d, 0x55, 0x40, 0x01, 0x00, 0x08, +			0x82, 0x80, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80, +			0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, +			0x54, 0xab, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 40000000, +		.conf = { +			0x01, 0x51, 0x32, 0x55, 0x40, 0x01, 0x00, 0x08, +			0x82, 0x80, 0x2c, 0xd9, 0x45, 0xa0, 0xac, 0x80, +			0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, +			0x54, 0x9a, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 65000000, +		.conf = { +			0x01, 0xd1, 0x36, 0x34, 0x40, 0x1e, 0x0a, 0x08, +			0x82, 0xa0, 0x45, 0xd9, 0x45, 0xa0, 0xac, 0x80, +			0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, +			0x54, 0xbd, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 71000000, +		.conf = { +			0x01, 0xd1, 0x3b, 0x35, 0x40, 0x0c, 0x04, 0x08, +			0x85, 0xa0, 0x63, 0xd9, 0x45, 0xa0, 0xac, 0x80, +			0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, +			0x54, 0xad, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 73250000, +		.conf = { +			0x01, 0xd1, 0x3d, 0x35, 0x40, 0x18, 0x02, 0x08, +			0x83, 0xa0, 0x6e, 0xd9, 0x45, 0xa0, 0xac, 0x80, +			0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, +			0x54, 0xa8, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 74176000, +		.conf = { +			0x01, 0xd1, 0x3e, 0x35, 0x40, 0x5b, 0xde, 0x08, +			0x82, 0xa0, 0x73, 0xd9, 0x45, 0xa0, 0xac, 0x80, +			0x56, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, +			0x54, 0xa6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 74250000, +		.conf = { +			0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08, +			0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80, +			0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, +			0x54, 0xa5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, +		}, +	}, +	{ +		.pixel_clock = 83500000, +		.conf = { +			0x01, 0xd1, 0x23, 0x11, 0x40, 0x0c, 0xfb, 0x08, +			0x85, 0xa0, 0xd1, 0xd8, 0x45, 0xa0, 0xac, 0x80, +			0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, +			0x54, 0x93, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 106500000, +		.conf = { +			0x01, 0xd1, 0x2c, 0x12, 0x40, 0x0c, 0x09, 0x08, +			0x84, 0xa0, 0x0a, 0xd9, 0x45, 0xa0, 0xac, 0x80, +			0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, +			0x54, 0x73, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 108000000, +		.conf = { +			0x01, 0x51, 0x2d, 0x15, 0x40, 0x01, 0x00, 0x08, +			0x82, 0x80, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80, +			0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, +			0x54, 0xc7, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 115500000, +		.conf = { +			0x01, 0xd1, 0x30, 0x12, 0x40, 0x40, 0x10, 0x08, +			0x80, 0x80, 0x21, 0xd9, 0x45, 0xa0, 0xac, 0x80, +			0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, +			0x54, 0xaa, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 119000000, +		.conf = { +			0x01, 0xd1, 0x32, 0x1a, 0x40, 0x30, 0xd8, 0x08, +			0x04, 0xa0, 0x2a, 0xd9, 0x45, 0xa0, 0xac, 0x80, +			0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, +			0x54, 0x9d, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 146250000, +		.conf = { +			0x01, 0xd1, 0x3d, 0x15, 0x40, 0x18, 0xfd, 0x08, +			0x83, 0xa0, 0x6e, 0xd9, 0x45, 0xa0, 0xac, 0x80, +			0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, +			0x54, 0x50, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 148500000, +		.conf = { +			0x01, 0xd1, 0x1f, 0x00, 0x40, 0x40, 0xf8, 0x08, +			0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80, +			0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, +			0x54, 0x4b, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00, +		}, +	}, +}; + +static const struct hdmiphy_config hdmiphy_5420_configs[] = { +	{ +		.pixel_clock = 25200000, +		.conf = { +			0x01, 0x52, 0x3F, 0x55, 0x40, 0x01, 0x00, 0xC8, +			0x82, 0xC8, 0xBD, 0xD8, 0x45, 0xA0, 0xAC, 0x80, +			0x06, 0x80, 0x01, 0x84, 0x05, 0x02, 0x24, 0x66, +			0x54, 0xF4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 27000000, +		.conf = { +			0x01, 0xD1, 0x22, 0x51, 0x40, 0x08, 0xFC, 0xE0, +			0x98, 0xE8, 0xCB, 0xD8, 0x45, 0xA0, 0xAC, 0x80, +			0x06, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, +			0x54, 0xE4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 27027000, +		.conf = { +			0x01, 0xD1, 0x2D, 0x72, 0x40, 0x64, 0x12, 0xC8, +			0x43, 0xE8, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80, +			0x26, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, +			0x54, 0xE3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 36000000, +		.conf = { +			0x01, 0x51, 0x2D, 0x55, 0x40, 0x40, 0x00, 0xC8, +			0x02, 0xC8, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80, +			0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, +			0x54, 0xAB, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 40000000, +		.conf = { +			0x01, 0xD1, 0x21, 0x31, 0x40, 0x3C, 0x28, 0xC8, +			0x87, 0xE8, 0xC8, 0xD8, 0x45, 0xA0, 0xAC, 0x80, +			0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, +			0x54, 0x9A, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 65000000, +		.conf = { +			0x01, 0xD1, 0x36, 0x34, 0x40, 0x0C, 0x04, 0xC8, +			0x82, 0xE8, 0x45, 0xD9, 0x45, 0xA0, 0xAC, 0x80, +			0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, +			0x54, 0xBD, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 71000000, +		.conf = { +			0x01, 0xD1, 0x3B, 0x35, 0x40, 0x0C, 0x04, 0xC8, +			0x85, 0xE8, 0x63, 0xD9, 0x45, 0xA0, 0xAC, 0x80, +			0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, +			0x54, 0x57, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 73250000, +		.conf = { +			0x01, 0xD1, 0x1F, 0x10, 0x40, 0x78, 0x8D, 0xC8, +			0x81, 0xE8, 0xB7, 0xD8, 0x45, 0xA0, 0xAC, 0x80, +			0x56, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, +			0x54, 0xA8, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 74176000, +		.conf = { +			0x01, 0xD1, 0x1F, 0x10, 0x40, 0x5B, 0xEF, 0xC8, +			0x81, 0xE8, 0xB9, 0xD8, 0x45, 0xA0, 0xAC, 0x80, +			0x56, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, +			0x54, 0xA6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 74250000, +		.conf = { +			0x01, 0xD1, 0x1F, 0x10, 0x40, 0x40, 0xF8, 0x08, +			0x81, 0xE8, 0xBA, 0xD8, 0x45, 0xA0, 0xAC, 0x80, +			0x26, 0x80, 0x09, 0x84, 0x05, 0x22, 0x24, 0x66, +			0x54, 0xA5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 83500000, +		.conf = { +			0x01, 0xD1, 0x23, 0x11, 0x40, 0x0C, 0xFB, 0xC8, +			0x85, 0xE8, 0xD1, 0xD8, 0x45, 0xA0, 0xAC, 0x80, +			0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, +			0x54, 0x4A, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 88750000, +		.conf = { +			0x01, 0xD1, 0x25, 0x11, 0x40, 0x18, 0xFF, 0xC8, +			0x83, 0xE8, 0xDE, 0xD8, 0x45, 0xA0, 0xAC, 0x80, +			0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, +			0x54, 0x45, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 106500000, +		.conf = { +			0x01, 0xD1, 0x2C, 0x12, 0x40, 0x0C, 0x09, 0xC8, +			0x84, 0xE8, 0x0A, 0xD9, 0x45, 0xA0, 0xAC, 0x80, +			0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, +			0x54, 0x73, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 108000000, +		.conf = { +			0x01, 0x51, 0x2D, 0x15, 0x40, 0x01, 0x00, 0xC8, +			0x82, 0xC8, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80, +			0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, +			0x54, 0xC7, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 115500000, +		.conf = { +			0x01, 0xD1, 0x30, 0x14, 0x40, 0x0C, 0x03, 0xC8, +			0x88, 0xE8, 0x21, 0xD9, 0x45, 0xA0, 0xAC, 0x80, +			0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, +			0x54, 0x6A, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 146250000, +		.conf = { +			0x01, 0xD1, 0x3D, 0x15, 0x40, 0x18, 0xFD, 0xC8, +			0x83, 0xE8, 0x6E, 0xD9, 0x45, 0xA0, 0xAC, 0x80, +			0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, +			0x54, 0x54, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, +		}, +	}, +	{ +		.pixel_clock = 148500000, +		.conf = { +			0x01, 0xD1, 0x1F, 0x00, 0x40, 0x40, 0xF8, 0x08, +			0x81, 0xE8, 0xBA, 0xD8, 0x45, 0xA0, 0xAC, 0x80, +			0x26, 0x80, 0x09, 0x84, 0x05, 0x22, 0x24, 0x66, +			0x54, 0x4B, 0x25, 0x03, 0x00, 0x80, 0x01, 0x80, +		}, +	}, +}; + +static struct hdmi_driver_data exynos5420_hdmi_driver_data = { +	.type		= HDMI_TYPE14, +	.phy_confs	= hdmiphy_5420_configs, +	.phy_conf_count	= ARRAY_SIZE(hdmiphy_5420_configs), +	.is_apb_phy	= 1, +}; + +static struct hdmi_driver_data exynos4212_hdmi_driver_data = { +	.type		= HDMI_TYPE14, +	.phy_confs	= hdmiphy_v14_configs, +	.phy_conf_count	= ARRAY_SIZE(hdmiphy_v14_configs), +	.is_apb_phy	= 0, +}; + +static struct hdmi_driver_data exynos5_hdmi_driver_data = { +	.type		= HDMI_TYPE14, +	.phy_confs	= hdmiphy_v13_configs, +	.phy_conf_count	= ARRAY_SIZE(hdmiphy_v13_configs), +	.is_apb_phy	= 0, +}; + +static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id) +{ +	return readl(hdata->regs + reg_id); +} + +static inline void hdmi_reg_writeb(struct hdmi_context *hdata, +				 u32 reg_id, u8 value) +{ +	writeb(value, hdata->regs + reg_id); +} + +static inline void hdmi_reg_writemask(struct hdmi_context *hdata, +				 u32 reg_id, u32 value, u32 mask) +{ +	u32 old = readl(hdata->regs + reg_id); +	value = (value & mask) | (old & ~mask); +	writel(value, hdata->regs + reg_id); +} + +static int hdmiphy_reg_writeb(struct hdmi_context *hdata, +			u32 reg_offset, u8 value) +{ +	if (hdata->hdmiphy_port) { +		u8 buffer[2]; +		int ret; + +		buffer[0] = reg_offset; +		buffer[1] = value; + +		ret = i2c_master_send(hdata->hdmiphy_port, buffer, 2); +		if (ret == 2) +			return 0; +		return ret; +	} else { +		writeb(value, hdata->regs_hdmiphy + (reg_offset<<2)); +		return 0; +	} +} + +static int hdmiphy_reg_write_buf(struct hdmi_context *hdata, +			u32 reg_offset, const u8 *buf, u32 len) +{ +	if ((reg_offset + len) > 32) +		return -EINVAL; + +	if (hdata->hdmiphy_port) { +		int ret; + +		ret = i2c_master_send(hdata->hdmiphy_port, buf, len); +		if (ret == len) +			return 0; +		return ret; +	} else { +		int i; +		for (i = 0; i < len; i++) +			writeb(buf[i], hdata->regs_hdmiphy + +				((reg_offset + i)<<2)); +		return 0; +	} +} + +static void hdmi_v13_regs_dump(struct hdmi_context *hdata, char *prefix) +{ +#define DUMPREG(reg_id) \ +	DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \ +	readl(hdata->regs + reg_id)) +	DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix); +	DUMPREG(HDMI_INTC_FLAG); +	DUMPREG(HDMI_INTC_CON); +	DUMPREG(HDMI_HPD_STATUS); +	DUMPREG(HDMI_V13_PHY_RSTOUT); +	DUMPREG(HDMI_V13_PHY_VPLL); +	DUMPREG(HDMI_V13_PHY_CMU); +	DUMPREG(HDMI_V13_CORE_RSTOUT); + +	DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix); +	DUMPREG(HDMI_CON_0); +	DUMPREG(HDMI_CON_1); +	DUMPREG(HDMI_CON_2); +	DUMPREG(HDMI_SYS_STATUS); +	DUMPREG(HDMI_V13_PHY_STATUS); +	DUMPREG(HDMI_STATUS_EN); +	DUMPREG(HDMI_HPD); +	DUMPREG(HDMI_MODE_SEL); +	DUMPREG(HDMI_V13_HPD_GEN); +	DUMPREG(HDMI_V13_DC_CONTROL); +	DUMPREG(HDMI_V13_VIDEO_PATTERN_GEN); + +	DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix); +	DUMPREG(HDMI_H_BLANK_0); +	DUMPREG(HDMI_H_BLANK_1); +	DUMPREG(HDMI_V13_V_BLANK_0); +	DUMPREG(HDMI_V13_V_BLANK_1); +	DUMPREG(HDMI_V13_V_BLANK_2); +	DUMPREG(HDMI_V13_H_V_LINE_0); +	DUMPREG(HDMI_V13_H_V_LINE_1); +	DUMPREG(HDMI_V13_H_V_LINE_2); +	DUMPREG(HDMI_VSYNC_POL); +	DUMPREG(HDMI_INT_PRO_MODE); +	DUMPREG(HDMI_V13_V_BLANK_F_0); +	DUMPREG(HDMI_V13_V_BLANK_F_1); +	DUMPREG(HDMI_V13_V_BLANK_F_2); +	DUMPREG(HDMI_V13_H_SYNC_GEN_0); +	DUMPREG(HDMI_V13_H_SYNC_GEN_1); +	DUMPREG(HDMI_V13_H_SYNC_GEN_2); +	DUMPREG(HDMI_V13_V_SYNC_GEN_1_0); +	DUMPREG(HDMI_V13_V_SYNC_GEN_1_1); +	DUMPREG(HDMI_V13_V_SYNC_GEN_1_2); +	DUMPREG(HDMI_V13_V_SYNC_GEN_2_0); +	DUMPREG(HDMI_V13_V_SYNC_GEN_2_1); +	DUMPREG(HDMI_V13_V_SYNC_GEN_2_2); +	DUMPREG(HDMI_V13_V_SYNC_GEN_3_0); +	DUMPREG(HDMI_V13_V_SYNC_GEN_3_1); +	DUMPREG(HDMI_V13_V_SYNC_GEN_3_2); + +	DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix); +	DUMPREG(HDMI_TG_CMD); +	DUMPREG(HDMI_TG_H_FSZ_L); +	DUMPREG(HDMI_TG_H_FSZ_H); +	DUMPREG(HDMI_TG_HACT_ST_L); +	DUMPREG(HDMI_TG_HACT_ST_H); +	DUMPREG(HDMI_TG_HACT_SZ_L); +	DUMPREG(HDMI_TG_HACT_SZ_H); +	DUMPREG(HDMI_TG_V_FSZ_L); +	DUMPREG(HDMI_TG_V_FSZ_H); +	DUMPREG(HDMI_TG_VSYNC_L); +	DUMPREG(HDMI_TG_VSYNC_H); +	DUMPREG(HDMI_TG_VSYNC2_L); +	DUMPREG(HDMI_TG_VSYNC2_H); +	DUMPREG(HDMI_TG_VACT_ST_L); +	DUMPREG(HDMI_TG_VACT_ST_H); +	DUMPREG(HDMI_TG_VACT_SZ_L); +	DUMPREG(HDMI_TG_VACT_SZ_H); +	DUMPREG(HDMI_TG_FIELD_CHG_L); +	DUMPREG(HDMI_TG_FIELD_CHG_H); +	DUMPREG(HDMI_TG_VACT_ST2_L); +	DUMPREG(HDMI_TG_VACT_ST2_H); +	DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L); +	DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H); +	DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L); +	DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H); +	DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L); +	DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H); +	DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L); +	DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H); +#undef DUMPREG +} + +static void hdmi_v14_regs_dump(struct hdmi_context *hdata, char *prefix) +{ +	int i; + +#define DUMPREG(reg_id) \ +	DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \ +	readl(hdata->regs + reg_id)) + +	DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix); +	DUMPREG(HDMI_INTC_CON); +	DUMPREG(HDMI_INTC_FLAG); +	DUMPREG(HDMI_HPD_STATUS); +	DUMPREG(HDMI_INTC_CON_1); +	DUMPREG(HDMI_INTC_FLAG_1); +	DUMPREG(HDMI_PHY_STATUS_0); +	DUMPREG(HDMI_PHY_STATUS_PLL); +	DUMPREG(HDMI_PHY_CON_0); +	DUMPREG(HDMI_PHY_RSTOUT); +	DUMPREG(HDMI_PHY_VPLL); +	DUMPREG(HDMI_PHY_CMU); +	DUMPREG(HDMI_CORE_RSTOUT); + +	DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix); +	DUMPREG(HDMI_CON_0); +	DUMPREG(HDMI_CON_1); +	DUMPREG(HDMI_CON_2); +	DUMPREG(HDMI_SYS_STATUS); +	DUMPREG(HDMI_PHY_STATUS_0); +	DUMPREG(HDMI_STATUS_EN); +	DUMPREG(HDMI_HPD); +	DUMPREG(HDMI_MODE_SEL); +	DUMPREG(HDMI_ENC_EN); +	DUMPREG(HDMI_DC_CONTROL); +	DUMPREG(HDMI_VIDEO_PATTERN_GEN); + +	DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix); +	DUMPREG(HDMI_H_BLANK_0); +	DUMPREG(HDMI_H_BLANK_1); +	DUMPREG(HDMI_V2_BLANK_0); +	DUMPREG(HDMI_V2_BLANK_1); +	DUMPREG(HDMI_V1_BLANK_0); +	DUMPREG(HDMI_V1_BLANK_1); +	DUMPREG(HDMI_V_LINE_0); +	DUMPREG(HDMI_V_LINE_1); +	DUMPREG(HDMI_H_LINE_0); +	DUMPREG(HDMI_H_LINE_1); +	DUMPREG(HDMI_HSYNC_POL); + +	DUMPREG(HDMI_VSYNC_POL); +	DUMPREG(HDMI_INT_PRO_MODE); +	DUMPREG(HDMI_V_BLANK_F0_0); +	DUMPREG(HDMI_V_BLANK_F0_1); +	DUMPREG(HDMI_V_BLANK_F1_0); +	DUMPREG(HDMI_V_BLANK_F1_1); + +	DUMPREG(HDMI_H_SYNC_START_0); +	DUMPREG(HDMI_H_SYNC_START_1); +	DUMPREG(HDMI_H_SYNC_END_0); +	DUMPREG(HDMI_H_SYNC_END_1); + +	DUMPREG(HDMI_V_SYNC_LINE_BEF_2_0); +	DUMPREG(HDMI_V_SYNC_LINE_BEF_2_1); +	DUMPREG(HDMI_V_SYNC_LINE_BEF_1_0); +	DUMPREG(HDMI_V_SYNC_LINE_BEF_1_1); + +	DUMPREG(HDMI_V_SYNC_LINE_AFT_2_0); +	DUMPREG(HDMI_V_SYNC_LINE_AFT_2_1); +	DUMPREG(HDMI_V_SYNC_LINE_AFT_1_0); +	DUMPREG(HDMI_V_SYNC_LINE_AFT_1_1); + +	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_2_0); +	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_2_1); +	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_1_0); +	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_1_1); + +	DUMPREG(HDMI_V_BLANK_F2_0); +	DUMPREG(HDMI_V_BLANK_F2_1); +	DUMPREG(HDMI_V_BLANK_F3_0); +	DUMPREG(HDMI_V_BLANK_F3_1); +	DUMPREG(HDMI_V_BLANK_F4_0); +	DUMPREG(HDMI_V_BLANK_F4_1); +	DUMPREG(HDMI_V_BLANK_F5_0); +	DUMPREG(HDMI_V_BLANK_F5_1); + +	DUMPREG(HDMI_V_SYNC_LINE_AFT_3_0); +	DUMPREG(HDMI_V_SYNC_LINE_AFT_3_1); +	DUMPREG(HDMI_V_SYNC_LINE_AFT_4_0); +	DUMPREG(HDMI_V_SYNC_LINE_AFT_4_1); +	DUMPREG(HDMI_V_SYNC_LINE_AFT_5_0); +	DUMPREG(HDMI_V_SYNC_LINE_AFT_5_1); +	DUMPREG(HDMI_V_SYNC_LINE_AFT_6_0); +	DUMPREG(HDMI_V_SYNC_LINE_AFT_6_1); + +	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_3_0); +	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_3_1); +	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_4_0); +	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_4_1); +	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_5_0); +	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_5_1); +	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_6_0); +	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_6_1); + +	DUMPREG(HDMI_VACT_SPACE_1_0); +	DUMPREG(HDMI_VACT_SPACE_1_1); +	DUMPREG(HDMI_VACT_SPACE_2_0); +	DUMPREG(HDMI_VACT_SPACE_2_1); +	DUMPREG(HDMI_VACT_SPACE_3_0); +	DUMPREG(HDMI_VACT_SPACE_3_1); +	DUMPREG(HDMI_VACT_SPACE_4_0); +	DUMPREG(HDMI_VACT_SPACE_4_1); +	DUMPREG(HDMI_VACT_SPACE_5_0); +	DUMPREG(HDMI_VACT_SPACE_5_1); +	DUMPREG(HDMI_VACT_SPACE_6_0); +	DUMPREG(HDMI_VACT_SPACE_6_1); + +	DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix); +	DUMPREG(HDMI_TG_CMD); +	DUMPREG(HDMI_TG_H_FSZ_L); +	DUMPREG(HDMI_TG_H_FSZ_H); +	DUMPREG(HDMI_TG_HACT_ST_L); +	DUMPREG(HDMI_TG_HACT_ST_H); +	DUMPREG(HDMI_TG_HACT_SZ_L); +	DUMPREG(HDMI_TG_HACT_SZ_H); +	DUMPREG(HDMI_TG_V_FSZ_L); +	DUMPREG(HDMI_TG_V_FSZ_H); +	DUMPREG(HDMI_TG_VSYNC_L); +	DUMPREG(HDMI_TG_VSYNC_H); +	DUMPREG(HDMI_TG_VSYNC2_L); +	DUMPREG(HDMI_TG_VSYNC2_H); +	DUMPREG(HDMI_TG_VACT_ST_L); +	DUMPREG(HDMI_TG_VACT_ST_H); +	DUMPREG(HDMI_TG_VACT_SZ_L); +	DUMPREG(HDMI_TG_VACT_SZ_H); +	DUMPREG(HDMI_TG_FIELD_CHG_L); +	DUMPREG(HDMI_TG_FIELD_CHG_H); +	DUMPREG(HDMI_TG_VACT_ST2_L); +	DUMPREG(HDMI_TG_VACT_ST2_H); +	DUMPREG(HDMI_TG_VACT_ST3_L); +	DUMPREG(HDMI_TG_VACT_ST3_H); +	DUMPREG(HDMI_TG_VACT_ST4_L); +	DUMPREG(HDMI_TG_VACT_ST4_H); +	DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L); +	DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H); +	DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L); +	DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H); +	DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L); +	DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H); +	DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L); +	DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H); +	DUMPREG(HDMI_TG_3D); + +	DRM_DEBUG_KMS("%s: ---- PACKET REGISTERS ----\n", prefix); +	DUMPREG(HDMI_AVI_CON); +	DUMPREG(HDMI_AVI_HEADER0); +	DUMPREG(HDMI_AVI_HEADER1); +	DUMPREG(HDMI_AVI_HEADER2); +	DUMPREG(HDMI_AVI_CHECK_SUM); +	DUMPREG(HDMI_VSI_CON); +	DUMPREG(HDMI_VSI_HEADER0); +	DUMPREG(HDMI_VSI_HEADER1); +	DUMPREG(HDMI_VSI_HEADER2); +	for (i = 0; i < 7; ++i) +		DUMPREG(HDMI_VSI_DATA(i)); + +#undef DUMPREG +} + +static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix) +{ +	if (hdata->type == HDMI_TYPE13) +		hdmi_v13_regs_dump(hdata, prefix); +	else +		hdmi_v14_regs_dump(hdata, prefix); +} + +static u8 hdmi_chksum(struct hdmi_context *hdata, +			u32 start, u8 len, u32 hdr_sum) +{ +	int i; + +	/* hdr_sum : header0 + header1 + header2 +	* start : start address of packet byte1 +	* len : packet bytes - 1 */ +	for (i = 0; i < len; ++i) +		hdr_sum += 0xff & hdmi_reg_read(hdata, start + i * 4); + +	/* return 2's complement of 8 bit hdr_sum */ +	return (u8)(~(hdr_sum & 0xff) + 1); +} + +static void hdmi_reg_infoframe(struct hdmi_context *hdata, +			union hdmi_infoframe *infoframe) +{ +	u32 hdr_sum; +	u8 chksum; +	u32 mod; +	u32 vic; + +	mod = hdmi_reg_read(hdata, HDMI_MODE_SEL); +	if (hdata->dvi_mode) { +		hdmi_reg_writeb(hdata, HDMI_VSI_CON, +				HDMI_VSI_CON_DO_NOT_TRANSMIT); +		hdmi_reg_writeb(hdata, HDMI_AVI_CON, +				HDMI_AVI_CON_DO_NOT_TRANSMIT); +		hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_NO_TRAN); +		return; +	} + +	switch (infoframe->any.type) { +	case HDMI_INFOFRAME_TYPE_AVI: +		hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC); +		hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->any.type); +		hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, +				infoframe->any.version); +		hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->any.length); +		hdr_sum = infoframe->any.type + infoframe->any.version + +			  infoframe->any.length; + +		/* Output format zero hardcoded ,RGB YBCR selection */ +		hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 | +			AVI_ACTIVE_FORMAT_VALID | +			AVI_UNDERSCANNED_DISPLAY_VALID); + +		/* +		 * Set the aspect ratio as per the mode, mentioned in +		 * Table 9 AVI InfoFrame Data Byte 2 of CEA-861-D Standard +		 */ +		switch (hdata->mode_conf.aspect_ratio) { +		case HDMI_PICTURE_ASPECT_4_3: +			hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), +					hdata->mode_conf.aspect_ratio | +					AVI_4_3_CENTER_RATIO); +			break; +		case HDMI_PICTURE_ASPECT_16_9: +			hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), +					hdata->mode_conf.aspect_ratio | +					AVI_16_9_CENTER_RATIO); +			break; +		case HDMI_PICTURE_ASPECT_NONE: +		default: +			hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), +					hdata->mode_conf.aspect_ratio | +					AVI_SAME_AS_PIC_ASPECT_RATIO); +			break; +		} + +		vic = hdata->mode_conf.cea_video_id; +		hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic); + +		chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1), +					infoframe->any.length, hdr_sum); +		DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum); +		hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum); +		break; +	case HDMI_INFOFRAME_TYPE_AUDIO: +		hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02); +		hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->any.type); +		hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, +				infoframe->any.version); +		hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->any.length); +		hdr_sum = infoframe->any.type + infoframe->any.version + +			  infoframe->any.length; +		chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1), +					infoframe->any.length, hdr_sum); +		DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum); +		hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum); +		break; +	default: +		break; +	} +} + +static enum drm_connector_status hdmi_detect(struct drm_connector *connector, +				bool force) +{ +	struct hdmi_context *hdata = ctx_from_connector(connector); + +	hdata->hpd = gpio_get_value(hdata->hpd_gpio); + +	return hdata->hpd ? connector_status_connected : +			connector_status_disconnected; +} + +static void hdmi_connector_destroy(struct drm_connector *connector) +{ +} + +static struct drm_connector_funcs hdmi_connector_funcs = { +	.dpms = drm_helper_connector_dpms, +	.fill_modes = drm_helper_probe_single_connector_modes, +	.detect = hdmi_detect, +	.destroy = hdmi_connector_destroy, +}; + +static int hdmi_get_modes(struct drm_connector *connector) +{ +	struct hdmi_context *hdata = ctx_from_connector(connector); +	struct edid *edid; + +	if (!hdata->ddc_adpt) +		return -ENODEV; + +	edid = drm_get_edid(connector, hdata->ddc_adpt); +	if (!edid) +		return -ENODEV; + +	hdata->dvi_mode = !drm_detect_hdmi_monitor(edid); +	DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n", +		(hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"), +		edid->width_cm, edid->height_cm); + +	drm_mode_connector_update_edid_property(connector, edid); + +	return drm_add_edid_modes(connector, edid); +} + +static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock) +{ +	int i; + +	for (i = 0; i < hdata->phy_conf_count; i++) +		if (hdata->phy_confs[i].pixel_clock == pixel_clock) +			return i; + +	DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock); +	return -EINVAL; +} + +static int hdmi_mode_valid(struct drm_connector *connector, +			struct drm_display_mode *mode) +{ +	struct hdmi_context *hdata = ctx_from_connector(connector); +	int ret; + +	DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n", +		mode->hdisplay, mode->vdisplay, mode->vrefresh, +		(mode->flags & DRM_MODE_FLAG_INTERLACE) ? true : +		false, mode->clock * 1000); + +	ret = mixer_check_mode(mode); +	if (ret) +		return MODE_BAD; + +	ret = hdmi_find_phy_conf(hdata, mode->clock * 1000); +	if (ret < 0) +		return MODE_BAD; + +	return MODE_OK; +} + +static struct drm_encoder *hdmi_best_encoder(struct drm_connector *connector) +{ +	struct hdmi_context *hdata = ctx_from_connector(connector); + +	return hdata->encoder; +} + +static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = { +	.get_modes = hdmi_get_modes, +	.mode_valid = hdmi_mode_valid, +	.best_encoder = hdmi_best_encoder, +}; + +static int hdmi_create_connector(struct exynos_drm_display *display, +			struct drm_encoder *encoder) +{ +	struct hdmi_context *hdata = display->ctx; +	struct drm_connector *connector = &hdata->connector; +	int ret; + +	hdata->encoder = encoder; +	connector->interlace_allowed = true; +	connector->polled = DRM_CONNECTOR_POLL_HPD; + +	ret = drm_connector_init(hdata->drm_dev, connector, +			&hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA); +	if (ret) { +		DRM_ERROR("Failed to initialize connector with drm\n"); +		return ret; +	} + +	drm_connector_helper_add(connector, &hdmi_connector_helper_funcs); +	drm_sysfs_connector_add(connector); +	drm_mode_connector_attach_encoder(connector, encoder); + +	return 0; +} + +static void hdmi_mode_fixup(struct exynos_drm_display *display, +				struct drm_connector *connector, +				const struct drm_display_mode *mode, +				struct drm_display_mode *adjusted_mode) +{ +	struct drm_display_mode *m; +	int mode_ok; + +	DRM_DEBUG_KMS("%s\n", __FILE__); + +	drm_mode_set_crtcinfo(adjusted_mode, 0); + +	mode_ok = hdmi_mode_valid(connector, adjusted_mode); + +	/* just return if user desired mode exists. */ +	if (mode_ok == MODE_OK) +		return; + +	/* +	 * otherwise, find the most suitable mode among modes and change it +	 * to adjusted_mode. +	 */ +	list_for_each_entry(m, &connector->modes, head) { +		mode_ok = hdmi_mode_valid(connector, m); + +		if (mode_ok == MODE_OK) { +			DRM_INFO("desired mode doesn't exist so\n"); +			DRM_INFO("use the most suitable mode among modes.\n"); + +			DRM_DEBUG_KMS("Adjusted Mode: [%d]x[%d] [%d]Hz\n", +				m->hdisplay, m->vdisplay, m->vrefresh); + +			drm_mode_copy(adjusted_mode, m); +			break; +		} +	} +} + +static void hdmi_set_acr(u32 freq, u8 *acr) +{ +	u32 n, cts; + +	switch (freq) { +	case 32000: +		n = 4096; +		cts = 27000; +		break; +	case 44100: +		n = 6272; +		cts = 30000; +		break; +	case 88200: +		n = 12544; +		cts = 30000; +		break; +	case 176400: +		n = 25088; +		cts = 30000; +		break; +	case 48000: +		n = 6144; +		cts = 27000; +		break; +	case 96000: +		n = 12288; +		cts = 27000; +		break; +	case 192000: +		n = 24576; +		cts = 27000; +		break; +	default: +		n = 0; +		cts = 0; +		break; +	} + +	acr[1] = cts >> 16; +	acr[2] = cts >> 8 & 0xff; +	acr[3] = cts & 0xff; + +	acr[4] = n >> 16; +	acr[5] = n >> 8 & 0xff; +	acr[6] = n & 0xff; +} + +static void hdmi_reg_acr(struct hdmi_context *hdata, u8 *acr) +{ +	hdmi_reg_writeb(hdata, HDMI_ACR_N0, acr[6]); +	hdmi_reg_writeb(hdata, HDMI_ACR_N1, acr[5]); +	hdmi_reg_writeb(hdata, HDMI_ACR_N2, acr[4]); +	hdmi_reg_writeb(hdata, HDMI_ACR_MCTS0, acr[3]); +	hdmi_reg_writeb(hdata, HDMI_ACR_MCTS1, acr[2]); +	hdmi_reg_writeb(hdata, HDMI_ACR_MCTS2, acr[1]); +	hdmi_reg_writeb(hdata, HDMI_ACR_CTS0, acr[3]); +	hdmi_reg_writeb(hdata, HDMI_ACR_CTS1, acr[2]); +	hdmi_reg_writeb(hdata, HDMI_ACR_CTS2, acr[1]); + +	if (hdata->type == HDMI_TYPE13) +		hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 4); +	else +		hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4); +} + +static void hdmi_audio_init(struct hdmi_context *hdata) +{ +	u32 sample_rate, bits_per_sample, frame_size_code; +	u32 data_num, bit_ch, sample_frq; +	u32 val; +	u8 acr[7]; + +	sample_rate = 44100; +	bits_per_sample = 16; +	frame_size_code = 0; + +	switch (bits_per_sample) { +	case 20: +		data_num = 2; +		bit_ch  = 1; +		break; +	case 24: +		data_num = 3; +		bit_ch  = 1; +		break; +	default: +		data_num = 1; +		bit_ch  = 0; +		break; +	} + +	hdmi_set_acr(sample_rate, acr); +	hdmi_reg_acr(hdata, acr); + +	hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CON, HDMI_I2S_IN_DISABLE +				| HDMI_I2S_AUD_I2S | HDMI_I2S_CUV_I2S_ENABLE +				| HDMI_I2S_MUX_ENABLE); + +	hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CH, HDMI_I2S_CH0_EN +			| HDMI_I2S_CH1_EN | HDMI_I2S_CH2_EN); + +	hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CUV, HDMI_I2S_CUV_RL_EN); + +	sample_frq = (sample_rate == 44100) ? 0 : +			(sample_rate == 48000) ? 2 : +			(sample_rate == 32000) ? 3 : +			(sample_rate == 96000) ? 0xa : 0x0; + +	hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_DIS); +	hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_EN); + +	val = hdmi_reg_read(hdata, HDMI_I2S_DSD_CON) | 0x01; +	hdmi_reg_writeb(hdata, HDMI_I2S_DSD_CON, val); + +	/* Configuration I2S input ports. Configure I2S_PIN_SEL_0~4 */ +	hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_0, HDMI_I2S_SEL_SCLK(5) +			| HDMI_I2S_SEL_LRCK(6)); +	hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(1) +			| HDMI_I2S_SEL_SDATA2(4)); +	hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_2, HDMI_I2S_SEL_SDATA3(1) +			| HDMI_I2S_SEL_SDATA2(2)); +	hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_3, HDMI_I2S_SEL_DSD(0)); + +	/* I2S_CON_1 & 2 */ +	hdmi_reg_writeb(hdata, HDMI_I2S_CON_1, HDMI_I2S_SCLK_FALLING_EDGE +			| HDMI_I2S_L_CH_LOW_POL); +	hdmi_reg_writeb(hdata, HDMI_I2S_CON_2, HDMI_I2S_MSB_FIRST_MODE +			| HDMI_I2S_SET_BIT_CH(bit_ch) +			| HDMI_I2S_SET_SDATA_BIT(data_num) +			| HDMI_I2S_BASIC_FORMAT); + +	/* Configure register related to CUV information */ +	hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_0, HDMI_I2S_CH_STATUS_MODE_0 +			| HDMI_I2S_2AUD_CH_WITHOUT_PREEMPH +			| HDMI_I2S_COPYRIGHT +			| HDMI_I2S_LINEAR_PCM +			| HDMI_I2S_CONSUMER_FORMAT); +	hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_1, HDMI_I2S_CD_PLAYER); +	hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_2, HDMI_I2S_SET_SOURCE_NUM(0)); +	hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_3, HDMI_I2S_CLK_ACCUR_LEVEL_2 +			| HDMI_I2S_SET_SMP_FREQ(sample_frq)); +	hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_4, +			HDMI_I2S_ORG_SMP_FREQ_44_1 +			| HDMI_I2S_WORD_LEN_MAX24_24BITS +			| HDMI_I2S_WORD_LEN_MAX_24BITS); + +	hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_CON, HDMI_I2S_CH_STATUS_RELOAD); +} + +static void hdmi_audio_control(struct hdmi_context *hdata, bool onoff) +{ +	if (hdata->dvi_mode) +		return; + +	hdmi_reg_writeb(hdata, HDMI_AUI_CON, onoff ? 2 : 0); +	hdmi_reg_writemask(hdata, HDMI_CON_0, onoff ? +			HDMI_ASP_EN : HDMI_ASP_DIS, HDMI_ASP_MASK); +} + +static void hdmi_start(struct hdmi_context *hdata, bool start) +{ +	u32 val = start ? HDMI_TG_EN : 0; + +	if (hdata->current_mode.flags & DRM_MODE_FLAG_INTERLACE) +		val |= HDMI_FIELD_EN; + +	hdmi_reg_writemask(hdata, HDMI_CON_0, val, HDMI_EN); +	hdmi_reg_writemask(hdata, HDMI_TG_CMD, val, HDMI_TG_EN | HDMI_FIELD_EN); +} + +static void hdmi_conf_init(struct hdmi_context *hdata) +{ +	union hdmi_infoframe infoframe; + +	/* disable HPD interrupts from HDMI IP block, use GPIO instead */ +	hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | +		HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG); + +	/* choose HDMI mode */ +	hdmi_reg_writemask(hdata, HDMI_MODE_SEL, +		HDMI_MODE_HDMI_EN, HDMI_MODE_MASK); +	/* Apply Video preable and Guard band in HDMI mode only */ +	hdmi_reg_writeb(hdata, HDMI_CON_2, 0); +	/* disable bluescreen */ +	hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN); + +	if (hdata->dvi_mode) { +		/* choose DVI mode */ +		hdmi_reg_writemask(hdata, HDMI_MODE_SEL, +				HDMI_MODE_DVI_EN, HDMI_MODE_MASK); +		hdmi_reg_writeb(hdata, HDMI_CON_2, +				HDMI_VID_PREAMBLE_DIS | HDMI_GUARD_BAND_DIS); +	} + +	if (hdata->type == HDMI_TYPE13) { +		/* choose bluescreen (fecal) color */ +		hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12); +		hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_1, 0x34); +		hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_2, 0x56); + +		/* enable AVI packet every vsync, fixes purple line problem */ +		hdmi_reg_writeb(hdata, HDMI_V13_AVI_CON, 0x02); +		/* force RGB, look to CEA-861-D, table 7 for more detail */ +		hdmi_reg_writeb(hdata, HDMI_V13_AVI_BYTE(0), 0 << 5); +		hdmi_reg_writemask(hdata, HDMI_CON_1, 0x10 << 5, 0x11 << 5); + +		hdmi_reg_writeb(hdata, HDMI_V13_SPD_CON, 0x02); +		hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02); +		hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04); +	} else { +		infoframe.any.type = HDMI_INFOFRAME_TYPE_AVI; +		infoframe.any.version = HDMI_AVI_VERSION; +		infoframe.any.length = HDMI_AVI_LENGTH; +		hdmi_reg_infoframe(hdata, &infoframe); + +		infoframe.any.type = HDMI_INFOFRAME_TYPE_AUDIO; +		infoframe.any.version = HDMI_AUI_VERSION; +		infoframe.any.length = HDMI_AUI_LENGTH; +		hdmi_reg_infoframe(hdata, &infoframe); + +		/* enable AVI packet every vsync, fixes purple line problem */ +		hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5); +	} +} + +static void hdmi_v13_mode_apply(struct hdmi_context *hdata) +{ +	const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg; +	const struct hdmi_v13_core_regs *core = +		&hdata->mode_conf.conf.v13_conf.core; +	int tries; + +	/* setting core registers */ +	hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]); +	hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]); +	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_0, core->v_blank[0]); +	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_1, core->v_blank[1]); +	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_2, core->v_blank[2]); +	hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_0, core->h_v_line[0]); +	hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_1, core->h_v_line[1]); +	hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_2, core->h_v_line[2]); +	hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]); +	hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]); +	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_0, core->v_blank_f[0]); +	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_1, core->v_blank_f[1]); +	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_2, core->v_blank_f[2]); +	hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_0, core->h_sync_gen[0]); +	hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_1, core->h_sync_gen[1]); +	hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_2, core->h_sync_gen[2]); +	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_0, core->v_sync_gen1[0]); +	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_1, core->v_sync_gen1[1]); +	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_2, core->v_sync_gen1[2]); +	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_0, core->v_sync_gen2[0]); +	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_1, core->v_sync_gen2[1]); +	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_2, core->v_sync_gen2[2]); +	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_0, core->v_sync_gen3[0]); +	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_1, core->v_sync_gen3[1]); +	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_2, core->v_sync_gen3[2]); +	/* Timing generator registers */ +	hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]); + +	/* waiting for HDMIPHY's PLL to get to steady state */ +	for (tries = 100; tries; --tries) { +		u32 val = hdmi_reg_read(hdata, HDMI_V13_PHY_STATUS); +		if (val & HDMI_PHY_STATUS_READY) +			break; +		usleep_range(1000, 2000); +	} +	/* steady state not achieved */ +	if (tries == 0) { +		DRM_ERROR("hdmiphy's pll could not reach steady state.\n"); +		hdmi_regs_dump(hdata, "timing apply"); +	} + +	clk_disable_unprepare(hdata->res.sclk_hdmi); +	clk_set_parent(hdata->res.mout_hdmi, hdata->res.sclk_hdmiphy); +	clk_prepare_enable(hdata->res.sclk_hdmi); + +	/* enable HDMI and timing generator */ +	hdmi_start(hdata, true); +} + +static void hdmi_v14_mode_apply(struct hdmi_context *hdata) +{ +	const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg; +	const struct hdmi_v14_core_regs *core = +		&hdata->mode_conf.conf.v14_conf.core; +	int tries; + +	/* setting core registers */ +	hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]); +	hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]); +	hdmi_reg_writeb(hdata, HDMI_V2_BLANK_0, core->v2_blank[0]); +	hdmi_reg_writeb(hdata, HDMI_V2_BLANK_1, core->v2_blank[1]); +	hdmi_reg_writeb(hdata, HDMI_V1_BLANK_0, core->v1_blank[0]); +	hdmi_reg_writeb(hdata, HDMI_V1_BLANK_1, core->v1_blank[1]); +	hdmi_reg_writeb(hdata, HDMI_V_LINE_0, core->v_line[0]); +	hdmi_reg_writeb(hdata, HDMI_V_LINE_1, core->v_line[1]); +	hdmi_reg_writeb(hdata, HDMI_H_LINE_0, core->h_line[0]); +	hdmi_reg_writeb(hdata, HDMI_H_LINE_1, core->h_line[1]); +	hdmi_reg_writeb(hdata, HDMI_HSYNC_POL, core->hsync_pol[0]); +	hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]); +	hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]); +	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_0, core->v_blank_f0[0]); +	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_1, core->v_blank_f0[1]); +	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_0, core->v_blank_f1[0]); +	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_1, core->v_blank_f1[1]); +	hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_0, core->h_sync_start[0]); +	hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_1, core->h_sync_start[1]); +	hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_0, core->h_sync_end[0]); +	hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_1, core->h_sync_end[1]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_0, +			core->v_sync_line_bef_2[0]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_1, +			core->v_sync_line_bef_2[1]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_0, +			core->v_sync_line_bef_1[0]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_1, +			core->v_sync_line_bef_1[1]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_0, +			core->v_sync_line_aft_2[0]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_1, +			core->v_sync_line_aft_2[1]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_0, +			core->v_sync_line_aft_1[0]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_1, +			core->v_sync_line_aft_1[1]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0, +			core->v_sync_line_aft_pxl_2[0]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_1, +			core->v_sync_line_aft_pxl_2[1]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0, +			core->v_sync_line_aft_pxl_1[0]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_1, +			core->v_sync_line_aft_pxl_1[1]); +	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_0, core->v_blank_f2[0]); +	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_1, core->v_blank_f2[1]); +	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_0, core->v_blank_f3[0]); +	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_1, core->v_blank_f3[1]); +	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_0, core->v_blank_f4[0]); +	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_1, core->v_blank_f4[1]); +	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_0, core->v_blank_f5[0]); +	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_1, core->v_blank_f5[1]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_0, +			core->v_sync_line_aft_3[0]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_1, +			core->v_sync_line_aft_3[1]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_0, +			core->v_sync_line_aft_4[0]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_1, +			core->v_sync_line_aft_4[1]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_0, +			core->v_sync_line_aft_5[0]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_1, +			core->v_sync_line_aft_5[1]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_0, +			core->v_sync_line_aft_6[0]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_1, +			core->v_sync_line_aft_6[1]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_0, +			core->v_sync_line_aft_pxl_3[0]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_1, +			core->v_sync_line_aft_pxl_3[1]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_0, +			core->v_sync_line_aft_pxl_4[0]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_1, +			core->v_sync_line_aft_pxl_4[1]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0, +			core->v_sync_line_aft_pxl_5[0]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_1, +			core->v_sync_line_aft_pxl_5[1]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0, +			core->v_sync_line_aft_pxl_6[0]); +	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_1, +			core->v_sync_line_aft_pxl_6[1]); +	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_0, core->vact_space_1[0]); +	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_1, core->vact_space_1[1]); +	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_0, core->vact_space_2[0]); +	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_1, core->vact_space_2[1]); +	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_0, core->vact_space_3[0]); +	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_1, core->vact_space_3[1]); +	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_0, core->vact_space_4[0]); +	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_1, core->vact_space_4[1]); +	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_0, core->vact_space_5[0]); +	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_1, core->vact_space_5[1]); +	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_0, core->vact_space_6[0]); +	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_1, core->vact_space_6[1]); + +	/* Timing generator registers */ +	hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_L, tg->vact_st3[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_H, tg->vact_st3[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_L, tg->vact_st4[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_H, tg->vact_st4[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]); +	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]); +	hdmi_reg_writeb(hdata, HDMI_TG_3D, tg->tg_3d[0]); + +	/* waiting for HDMIPHY's PLL to get to steady state */ +	for (tries = 100; tries; --tries) { +		u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS_0); +		if (val & HDMI_PHY_STATUS_READY) +			break; +		usleep_range(1000, 2000); +	} +	/* steady state not achieved */ +	if (tries == 0) { +		DRM_ERROR("hdmiphy's pll could not reach steady state.\n"); +		hdmi_regs_dump(hdata, "timing apply"); +	} + +	clk_disable_unprepare(hdata->res.sclk_hdmi); +	clk_set_parent(hdata->res.mout_hdmi, hdata->res.sclk_hdmiphy); +	clk_prepare_enable(hdata->res.sclk_hdmi); + +	/* enable HDMI and timing generator */ +	hdmi_start(hdata, true); +} + +static void hdmi_mode_apply(struct hdmi_context *hdata) +{ +	if (hdata->type == HDMI_TYPE13) +		hdmi_v13_mode_apply(hdata); +	else +		hdmi_v14_mode_apply(hdata); +} + +static void hdmiphy_conf_reset(struct hdmi_context *hdata) +{ +	u8 buffer[2]; +	u32 reg; + +	clk_disable_unprepare(hdata->res.sclk_hdmi); +	clk_set_parent(hdata->res.mout_hdmi, hdata->res.sclk_pixel); +	clk_prepare_enable(hdata->res.sclk_hdmi); + +	/* operation mode */ +	buffer[0] = 0x1f; +	buffer[1] = 0x00; + +	if (hdata->hdmiphy_port) +		i2c_master_send(hdata->hdmiphy_port, buffer, 2); + +	if (hdata->type == HDMI_TYPE13) +		reg = HDMI_V13_PHY_RSTOUT; +	else +		reg = HDMI_PHY_RSTOUT; + +	/* reset hdmiphy */ +	hdmi_reg_writemask(hdata, reg, ~0, HDMI_PHY_SW_RSTOUT); +	usleep_range(10000, 12000); +	hdmi_reg_writemask(hdata, reg,  0, HDMI_PHY_SW_RSTOUT); +	usleep_range(10000, 12000); +} + +static void hdmiphy_poweron(struct hdmi_context *hdata) +{ +	if (hdata->type != HDMI_TYPE14) +		return; + +	DRM_DEBUG_KMS("\n"); + +	/* For PHY Mode Setting */ +	hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE, +				HDMI_PHY_ENABLE_MODE_SET); +	/* Phy Power On */ +	hdmiphy_reg_writeb(hdata, HDMIPHY_POWER, +				HDMI_PHY_POWER_ON); +	/* For PHY Mode Setting */ +	hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE, +				HDMI_PHY_DISABLE_MODE_SET); +	/* PHY SW Reset */ +	hdmiphy_conf_reset(hdata); +} + +static void hdmiphy_poweroff(struct hdmi_context *hdata) +{ +	if (hdata->type != HDMI_TYPE14) +		return; + +	DRM_DEBUG_KMS("\n"); + +	/* PHY SW Reset */ +	hdmiphy_conf_reset(hdata); +	/* For PHY Mode Setting */ +	hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE, +				HDMI_PHY_ENABLE_MODE_SET); + +	/* PHY Power Off */ +	hdmiphy_reg_writeb(hdata, HDMIPHY_POWER, +				HDMI_PHY_POWER_OFF); + +	/* For PHY Mode Setting */ +	hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE, +				HDMI_PHY_DISABLE_MODE_SET); +} + +static void hdmiphy_conf_apply(struct hdmi_context *hdata) +{ +	int ret; +	int i; + +	/* pixel clock */ +	i = hdmi_find_phy_conf(hdata, hdata->mode_conf.pixel_clock); +	if (i < 0) { +		DRM_ERROR("failed to find hdmiphy conf\n"); +		return; +	} + +	ret = hdmiphy_reg_write_buf(hdata, 0, hdata->phy_confs[i].conf, 32); +	if (ret) { +		DRM_ERROR("failed to configure hdmiphy\n"); +		return; +	} + +	usleep_range(10000, 12000); + +	ret = hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE, +				HDMI_PHY_DISABLE_MODE_SET); +	if (ret) { +		DRM_ERROR("failed to enable hdmiphy\n"); +		return; +	} + +} + +static void hdmi_conf_apply(struct hdmi_context *hdata) +{ +	hdmiphy_conf_reset(hdata); +	hdmiphy_conf_apply(hdata); + +	mutex_lock(&hdata->hdmi_mutex); +	hdmi_start(hdata, false); +	hdmi_conf_init(hdata); +	mutex_unlock(&hdata->hdmi_mutex); + +	hdmi_audio_init(hdata); + +	/* setting core registers */ +	hdmi_mode_apply(hdata); +	hdmi_audio_control(hdata, true); + +	hdmi_regs_dump(hdata, "start"); +} + +static void hdmi_set_reg(u8 *reg_pair, int num_bytes, u32 value) +{ +	int i; +	BUG_ON(num_bytes > 4); +	for (i = 0; i < num_bytes; i++) +		reg_pair[i] = (value >> (8 * i)) & 0xff; +} + +static void hdmi_v13_mode_set(struct hdmi_context *hdata, +			struct drm_display_mode *m) +{ +	struct hdmi_v13_core_regs *core = &hdata->mode_conf.conf.v13_conf.core; +	struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg; +	unsigned int val; + +	hdata->mode_conf.cea_video_id = +		drm_match_cea_mode((struct drm_display_mode *)m); +	hdata->mode_conf.pixel_clock = m->clock * 1000; +	hdata->mode_conf.aspect_ratio = m->picture_aspect_ratio; + +	hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay); +	hdmi_set_reg(core->h_v_line, 3, (m->htotal << 12) | m->vtotal); + +	val = (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0; +	hdmi_set_reg(core->vsync_pol, 1, val); + +	val = (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0; +	hdmi_set_reg(core->int_pro_mode, 1, val); + +	val = (m->hsync_start - m->hdisplay - 2); +	val |= ((m->hsync_end - m->hdisplay - 2) << 10); +	val |= ((m->flags & DRM_MODE_FLAG_NHSYNC)  ? 1 : 0)<<20; +	hdmi_set_reg(core->h_sync_gen, 3, val); + +	/* +	 * Quirk requirement for exynos HDMI IP design, +	 * 2 pixels less than the actual calculation for hsync_start +	 * and end. +	 */ + +	/* Following values & calculations differ for different type of modes */ +	if (m->flags & DRM_MODE_FLAG_INTERLACE) { +		/* Interlaced Mode */ +		val = ((m->vsync_end - m->vdisplay) / 2); +		val |= ((m->vsync_start - m->vdisplay) / 2) << 12; +		hdmi_set_reg(core->v_sync_gen1, 3, val); + +		val = m->vtotal / 2; +		val |= ((m->vtotal - m->vdisplay) / 2) << 11; +		hdmi_set_reg(core->v_blank, 3, val); + +		val = (m->vtotal + +			((m->vsync_end - m->vsync_start) * 4) + 5) / 2; +		val |= m->vtotal << 11; +		hdmi_set_reg(core->v_blank_f, 3, val); + +		val = ((m->vtotal / 2) + 7); +		val |= ((m->vtotal / 2) + 2) << 12; +		hdmi_set_reg(core->v_sync_gen2, 3, val); + +		val = ((m->htotal / 2) + (m->hsync_start - m->hdisplay)); +		val |= ((m->htotal / 2) + +			(m->hsync_start - m->hdisplay)) << 12; +		hdmi_set_reg(core->v_sync_gen3, 3, val); + +		hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2); +		hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2); + +		hdmi_set_reg(tg->vact_st2, 2, 0x249);/* Reset value + 1*/ +	} else { +		/* Progressive Mode */ + +		val = m->vtotal; +		val |= (m->vtotal - m->vdisplay) << 11; +		hdmi_set_reg(core->v_blank, 3, val); + +		hdmi_set_reg(core->v_blank_f, 3, 0); + +		val = (m->vsync_end - m->vdisplay); +		val |= ((m->vsync_start - m->vdisplay) << 12); +		hdmi_set_reg(core->v_sync_gen1, 3, val); + +		hdmi_set_reg(core->v_sync_gen2, 3, 0x1001);/* Reset value  */ +		hdmi_set_reg(core->v_sync_gen3, 3, 0x1001);/* Reset value  */ +		hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay); +		hdmi_set_reg(tg->vact_sz, 2, m->vdisplay); +		hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */ +	} + +	/* Timing generator registers */ +	hdmi_set_reg(tg->cmd, 1, 0x0); +	hdmi_set_reg(tg->h_fsz, 2, m->htotal); +	hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay); +	hdmi_set_reg(tg->hact_sz, 2, m->hdisplay); +	hdmi_set_reg(tg->v_fsz, 2, m->vtotal); +	hdmi_set_reg(tg->vsync, 2, 0x1); +	hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */ +	hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */ +	hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */ +	hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */ +	hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */ +	hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */ +	hdmi_set_reg(tg->tg_3d, 1, 0x0); /* Not used */ +} + +static void hdmi_v14_mode_set(struct hdmi_context *hdata, +			struct drm_display_mode *m) +{ +	struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg; +	struct hdmi_v14_core_regs *core = +		&hdata->mode_conf.conf.v14_conf.core; + +	hdata->mode_conf.cea_video_id = +		drm_match_cea_mode((struct drm_display_mode *)m); +	hdata->mode_conf.pixel_clock = m->clock * 1000; +	hdata->mode_conf.aspect_ratio = m->picture_aspect_ratio; + +	hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay); +	hdmi_set_reg(core->v_line, 2, m->vtotal); +	hdmi_set_reg(core->h_line, 2, m->htotal); +	hdmi_set_reg(core->hsync_pol, 1, +			(m->flags & DRM_MODE_FLAG_NHSYNC)  ? 1 : 0); +	hdmi_set_reg(core->vsync_pol, 1, +			(m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0); +	hdmi_set_reg(core->int_pro_mode, 1, +			(m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0); + +	/* +	 * Quirk requirement for exynos 5 HDMI IP design, +	 * 2 pixels less than the actual calculation for hsync_start +	 * and end. +	 */ + +	/* Following values & calculations differ for different type of modes */ +	if (m->flags & DRM_MODE_FLAG_INTERLACE) { +		/* Interlaced Mode */ +		hdmi_set_reg(core->v_sync_line_bef_2, 2, +			(m->vsync_end - m->vdisplay) / 2); +		hdmi_set_reg(core->v_sync_line_bef_1, 2, +			(m->vsync_start - m->vdisplay) / 2); +		hdmi_set_reg(core->v2_blank, 2, m->vtotal / 2); +		hdmi_set_reg(core->v1_blank, 2, (m->vtotal - m->vdisplay) / 2); +		hdmi_set_reg(core->v_blank_f0, 2, m->vtotal - m->vdisplay / 2); +		hdmi_set_reg(core->v_blank_f1, 2, m->vtotal); +		hdmi_set_reg(core->v_sync_line_aft_2, 2, (m->vtotal / 2) + 7); +		hdmi_set_reg(core->v_sync_line_aft_1, 2, (m->vtotal / 2) + 2); +		hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2, +			(m->htotal / 2) + (m->hsync_start - m->hdisplay)); +		hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2, +			(m->htotal / 2) + (m->hsync_start - m->hdisplay)); +		hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2); +		hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2); +		hdmi_set_reg(tg->vact_st2, 2, m->vtotal - m->vdisplay / 2); +		hdmi_set_reg(tg->vsync2, 2, (m->vtotal / 2) + 1); +		hdmi_set_reg(tg->vsync_bot_hdmi, 2, (m->vtotal / 2) + 1); +		hdmi_set_reg(tg->field_bot_hdmi, 2, (m->vtotal / 2) + 1); +		hdmi_set_reg(tg->vact_st3, 2, 0x0); +		hdmi_set_reg(tg->vact_st4, 2, 0x0); +	} else { +		/* Progressive Mode */ +		hdmi_set_reg(core->v_sync_line_bef_2, 2, +			m->vsync_end - m->vdisplay); +		hdmi_set_reg(core->v_sync_line_bef_1, 2, +			m->vsync_start - m->vdisplay); +		hdmi_set_reg(core->v2_blank, 2, m->vtotal); +		hdmi_set_reg(core->v1_blank, 2, m->vtotal - m->vdisplay); +		hdmi_set_reg(core->v_blank_f0, 2, 0xffff); +		hdmi_set_reg(core->v_blank_f1, 2, 0xffff); +		hdmi_set_reg(core->v_sync_line_aft_2, 2, 0xffff); +		hdmi_set_reg(core->v_sync_line_aft_1, 2, 0xffff); +		hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2, 0xffff); +		hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2, 0xffff); +		hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay); +		hdmi_set_reg(tg->vact_sz, 2, m->vdisplay); +		hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */ +		hdmi_set_reg(tg->vact_st3, 2, 0x47b); /* Reset value */ +		hdmi_set_reg(tg->vact_st4, 2, 0x6ae); /* Reset value */ +		hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */ +		hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */ +		hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */ +	} + +	/* Following values & calculations are same irrespective of mode type */ +	hdmi_set_reg(core->h_sync_start, 2, m->hsync_start - m->hdisplay - 2); +	hdmi_set_reg(core->h_sync_end, 2, m->hsync_end - m->hdisplay - 2); +	hdmi_set_reg(core->vact_space_1, 2, 0xffff); +	hdmi_set_reg(core->vact_space_2, 2, 0xffff); +	hdmi_set_reg(core->vact_space_3, 2, 0xffff); +	hdmi_set_reg(core->vact_space_4, 2, 0xffff); +	hdmi_set_reg(core->vact_space_5, 2, 0xffff); +	hdmi_set_reg(core->vact_space_6, 2, 0xffff); +	hdmi_set_reg(core->v_blank_f2, 2, 0xffff); +	hdmi_set_reg(core->v_blank_f3, 2, 0xffff); +	hdmi_set_reg(core->v_blank_f4, 2, 0xffff); +	hdmi_set_reg(core->v_blank_f5, 2, 0xffff); +	hdmi_set_reg(core->v_sync_line_aft_3, 2, 0xffff); +	hdmi_set_reg(core->v_sync_line_aft_4, 2, 0xffff); +	hdmi_set_reg(core->v_sync_line_aft_5, 2, 0xffff); +	hdmi_set_reg(core->v_sync_line_aft_6, 2, 0xffff); +	hdmi_set_reg(core->v_sync_line_aft_pxl_3, 2, 0xffff); +	hdmi_set_reg(core->v_sync_line_aft_pxl_4, 2, 0xffff); +	hdmi_set_reg(core->v_sync_line_aft_pxl_5, 2, 0xffff); +	hdmi_set_reg(core->v_sync_line_aft_pxl_6, 2, 0xffff); + +	/* Timing generator registers */ +	hdmi_set_reg(tg->cmd, 1, 0x0); +	hdmi_set_reg(tg->h_fsz, 2, m->htotal); +	hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay); +	hdmi_set_reg(tg->hact_sz, 2, m->hdisplay); +	hdmi_set_reg(tg->v_fsz, 2, m->vtotal); +	hdmi_set_reg(tg->vsync, 2, 0x1); +	hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */ +	hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */ +	hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */ +	hdmi_set_reg(tg->tg_3d, 1, 0x0); +} + +static void hdmi_mode_set(struct exynos_drm_display *display, +			struct drm_display_mode *mode) +{ +	struct hdmi_context *hdata = display->ctx; +	struct drm_display_mode *m = mode; + +	DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n", +		m->hdisplay, m->vdisplay, +		m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ? +		"INTERLACED" : "PROGERESSIVE"); + +	/* preserve mode information for later use. */ +	drm_mode_copy(&hdata->current_mode, mode); + +	if (hdata->type == HDMI_TYPE13) +		hdmi_v13_mode_set(hdata, mode); +	else +		hdmi_v14_mode_set(hdata, mode); +} + +static void hdmi_commit(struct exynos_drm_display *display) +{ +	struct hdmi_context *hdata = display->ctx; + +	mutex_lock(&hdata->hdmi_mutex); +	if (!hdata->powered) { +		mutex_unlock(&hdata->hdmi_mutex); +		return; +	} +	mutex_unlock(&hdata->hdmi_mutex); + +	hdmi_conf_apply(hdata); +} + +static void hdmi_poweron(struct exynos_drm_display *display) +{ +	struct hdmi_context *hdata = display->ctx; +	struct hdmi_resources *res = &hdata->res; + +	mutex_lock(&hdata->hdmi_mutex); +	if (hdata->powered) { +		mutex_unlock(&hdata->hdmi_mutex); +		return; +	} + +	hdata->powered = true; + +	mutex_unlock(&hdata->hdmi_mutex); + +	pm_runtime_get_sync(hdata->dev); + +	if (regulator_bulk_enable(res->regul_count, res->regul_bulk)) +		DRM_DEBUG_KMS("failed to enable regulator bulk\n"); + +	/* set pmu hdmiphy control bit to enable hdmiphy */ +	regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL, +			PMU_HDMI_PHY_ENABLE_BIT, 1); + +	clk_prepare_enable(res->hdmi); +	clk_prepare_enable(res->sclk_hdmi); + +	hdmiphy_poweron(hdata); +	hdmi_commit(display); +} + +static void hdmi_poweroff(struct exynos_drm_display *display) +{ +	struct hdmi_context *hdata = display->ctx; +	struct hdmi_resources *res = &hdata->res; + +	mutex_lock(&hdata->hdmi_mutex); +	if (!hdata->powered) +		goto out; +	mutex_unlock(&hdata->hdmi_mutex); + +	/* HDMI System Disable */ +	hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_EN); + +	hdmiphy_poweroff(hdata); + +	cancel_delayed_work(&hdata->hotplug_work); + +	clk_disable_unprepare(res->sclk_hdmi); +	clk_disable_unprepare(res->hdmi); + +	/* reset pmu hdmiphy control bit to disable hdmiphy */ +	regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL, +			PMU_HDMI_PHY_ENABLE_BIT, 0); + +	regulator_bulk_disable(res->regul_count, res->regul_bulk); + +	pm_runtime_put_sync(hdata->dev); + +	mutex_lock(&hdata->hdmi_mutex); +	hdata->powered = false; + +out: +	mutex_unlock(&hdata->hdmi_mutex); +} + +static void hdmi_dpms(struct exynos_drm_display *display, int mode) +{ +	struct hdmi_context *hdata = display->ctx; +	struct drm_encoder *encoder = hdata->encoder; +	struct drm_crtc *crtc = encoder->crtc; +	struct drm_crtc_helper_funcs *funcs = NULL; + +	DRM_DEBUG_KMS("mode %d\n", mode); + +	switch (mode) { +	case DRM_MODE_DPMS_ON: +		hdmi_poweron(display); +		break; +	case DRM_MODE_DPMS_STANDBY: +	case DRM_MODE_DPMS_SUSPEND: +	case DRM_MODE_DPMS_OFF: +		/* +		 * The SFRs of VP and Mixer are updated by Vertical Sync of +		 * Timing generator which is a part of HDMI so the sequence +		 * to disable TV Subsystem should be as following, +		 *	VP -> Mixer -> HDMI +		 * +		 * Below codes will try to disable Mixer and VP(if used) +		 * prior to disabling HDMI. +		 */ +		if (crtc) +			funcs = crtc->helper_private; +		if (funcs && funcs->dpms) +			(*funcs->dpms)(crtc, mode); + +		hdmi_poweroff(display); +		break; +	default: +		DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode); +		break; +	} +} + +static struct exynos_drm_display_ops hdmi_display_ops = { +	.create_connector = hdmi_create_connector, +	.mode_fixup	= hdmi_mode_fixup, +	.mode_set	= hdmi_mode_set, +	.dpms		= hdmi_dpms, +	.commit		= hdmi_commit, +}; + +static struct exynos_drm_display hdmi_display = { +	.type = EXYNOS_DISPLAY_TYPE_HDMI, +	.ops = &hdmi_display_ops, +}; + +static void hdmi_hotplug_work_func(struct work_struct *work) +{ +	struct hdmi_context *hdata; + +	hdata = container_of(work, struct hdmi_context, hotplug_work.work); + +	mutex_lock(&hdata->hdmi_mutex); +	hdata->hpd = gpio_get_value(hdata->hpd_gpio); +	mutex_unlock(&hdata->hdmi_mutex); + +	if (hdata->drm_dev) +		drm_helper_hpd_irq_event(hdata->drm_dev); +} + +static irqreturn_t hdmi_irq_thread(int irq, void *arg) +{ +	struct hdmi_context *hdata = arg; + +	mod_delayed_work(system_wq, &hdata->hotplug_work, +			msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS)); + +	return IRQ_HANDLED; +} + +static int hdmi_resources_init(struct hdmi_context *hdata) +{ +	struct device *dev = hdata->dev; +	struct hdmi_resources *res = &hdata->res; +	static char *supply[] = { +		"hdmi-en", +		"vdd", +		"vdd_osc", +		"vdd_pll", +	}; +	int i, ret; + +	DRM_DEBUG_KMS("HDMI resource init\n"); + +	/* get clocks, power */ +	res->hdmi = devm_clk_get(dev, "hdmi"); +	if (IS_ERR(res->hdmi)) { +		DRM_ERROR("failed to get clock 'hdmi'\n"); +		ret = PTR_ERR(res->hdmi); +		goto fail; +	} +	res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi"); +	if (IS_ERR(res->sclk_hdmi)) { +		DRM_ERROR("failed to get clock 'sclk_hdmi'\n"); +		ret = PTR_ERR(res->sclk_hdmi); +		goto fail; +	} +	res->sclk_pixel = devm_clk_get(dev, "sclk_pixel"); +	if (IS_ERR(res->sclk_pixel)) { +		DRM_ERROR("failed to get clock 'sclk_pixel'\n"); +		ret = PTR_ERR(res->sclk_pixel); +		goto fail; +	} +	res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy"); +	if (IS_ERR(res->sclk_hdmiphy)) { +		DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n"); +		ret = PTR_ERR(res->sclk_hdmiphy); +		goto fail; +	} +	res->mout_hdmi = devm_clk_get(dev, "mout_hdmi"); +	if (IS_ERR(res->mout_hdmi)) { +		DRM_ERROR("failed to get clock 'mout_hdmi'\n"); +		ret = PTR_ERR(res->mout_hdmi); +		goto fail; +	} + +	clk_set_parent(res->mout_hdmi, res->sclk_pixel); + +	res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) * +		sizeof(res->regul_bulk[0]), GFP_KERNEL); +	if (!res->regul_bulk) { +		ret = -ENOMEM; +		goto fail; +	} +	for (i = 0; i < ARRAY_SIZE(supply); ++i) { +		res->regul_bulk[i].supply = supply[i]; +		res->regul_bulk[i].consumer = NULL; +	} +	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk); +	if (ret) { +		DRM_ERROR("failed to get regulators\n"); +		return ret; +	} +	res->regul_count = ARRAY_SIZE(supply); + +	return ret; +fail: +	DRM_ERROR("HDMI resource init - failed\n"); +	return ret; +} + +static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata +					(struct device *dev) +{ +	struct device_node *np = dev->of_node; +	struct s5p_hdmi_platform_data *pd; +	u32 value; + +	pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); +	if (!pd) +		goto err_data; + +	if (!of_find_property(np, "hpd-gpio", &value)) { +		DRM_ERROR("no hpd gpio property found\n"); +		goto err_data; +	} + +	pd->hpd_gpio = of_get_named_gpio(np, "hpd-gpio", 0); + +	return pd; + +err_data: +	return NULL; +} + +static struct of_device_id hdmi_match_types[] = { +	{ +		.compatible = "samsung,exynos5-hdmi", +		.data = &exynos5_hdmi_driver_data, +	}, { +		.compatible = "samsung,exynos4212-hdmi", +		.data = &exynos4212_hdmi_driver_data, +	}, { +		.compatible = "samsung,exynos5420-hdmi", +		.data = &exynos5420_hdmi_driver_data, +	}, { +		/* end node */ +	} +}; + +static int hdmi_bind(struct device *dev, struct device *master, void *data) +{ +	struct drm_device *drm_dev = data; +	struct hdmi_context *hdata; + +	hdata = hdmi_display.ctx; +	hdata->drm_dev = drm_dev; + +	return exynos_drm_create_enc_conn(drm_dev, &hdmi_display); +} + +static void hdmi_unbind(struct device *dev, struct device *master, void *data) +{ +	struct exynos_drm_display *display = get_hdmi_display(dev); +	struct drm_encoder *encoder = display->encoder; +	struct hdmi_context *hdata = display->ctx; + +	encoder->funcs->destroy(encoder); +	drm_connector_cleanup(&hdata->connector); +} + +static const struct component_ops hdmi_component_ops = { +	.bind	= hdmi_bind, +	.unbind = hdmi_unbind, +}; + +static struct device_node *hdmi_legacy_ddc_dt_binding(struct device *dev) +{ +	const char *compatible_str = "samsung,exynos4210-hdmiddc"; +	struct device_node *np; + +	np = of_find_compatible_node(NULL, NULL, compatible_str); +	if (np) +		return of_get_next_parent(np); + +	return NULL; +} + +static struct device_node *hdmi_legacy_phy_dt_binding(struct device *dev) +{ +	const char *compatible_str = "samsung,exynos4212-hdmiphy"; + +	return of_find_compatible_node(NULL, NULL, compatible_str); +} + +static int hdmi_probe(struct platform_device *pdev) +{ +	struct device_node *ddc_node, *phy_node; +	struct s5p_hdmi_platform_data *pdata; +	struct hdmi_driver_data *drv_data; +	const struct of_device_id *match; +	struct device *dev = &pdev->dev; +	struct hdmi_context *hdata; +	struct resource *res; +	int ret; + +	ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR, +					hdmi_display.type); +	if (ret) +		return ret; + +	if (!dev->of_node) { +		ret = -ENODEV; +		goto err_del_component; +	} + +	pdata = drm_hdmi_dt_parse_pdata(dev); +	if (!pdata) { +		ret = -EINVAL; +		goto err_del_component; +	} + +	hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL); +	if (!hdata) { +		ret = -ENOMEM; +		goto err_del_component; +	} + +	mutex_init(&hdata->hdmi_mutex); + +	platform_set_drvdata(pdev, &hdmi_display); + +	match = of_match_node(hdmi_match_types, dev->of_node); +	if (!match) { +		ret = -ENODEV; +		goto err_del_component; +	} + +	drv_data = (struct hdmi_driver_data *)match->data; +	hdata->type = drv_data->type; +	hdata->phy_confs = drv_data->phy_confs; +	hdata->phy_conf_count = drv_data->phy_conf_count; + +	hdata->hpd_gpio = pdata->hpd_gpio; +	hdata->dev = dev; + +	ret = hdmi_resources_init(hdata); +	if (ret) { +		DRM_ERROR("hdmi_resources_init failed\n"); +		return ret; +	} + +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	hdata->regs = devm_ioremap_resource(dev, res); +	if (IS_ERR(hdata->regs)) { +		ret = PTR_ERR(hdata->regs); +		goto err_del_component; +	} + +	ret = devm_gpio_request(dev, hdata->hpd_gpio, "HPD"); +	if (ret) { +		DRM_ERROR("failed to request HPD gpio\n"); +		goto err_del_component; +	} + +	ddc_node = hdmi_legacy_ddc_dt_binding(dev); +	if (ddc_node) +		goto out_get_ddc_adpt; + +	/* DDC i2c driver */ +	ddc_node = of_parse_phandle(dev->of_node, "ddc", 0); +	if (!ddc_node) { +		DRM_ERROR("Failed to find ddc node in device tree\n"); +		ret = -ENODEV; +		goto err_del_component; +	} + +out_get_ddc_adpt: +	hdata->ddc_adpt = of_find_i2c_adapter_by_node(ddc_node); +	if (!hdata->ddc_adpt) { +		DRM_ERROR("Failed to get ddc i2c adapter by node\n"); +		return -EPROBE_DEFER; +	} + +	phy_node = hdmi_legacy_phy_dt_binding(dev); +	if (phy_node) +		goto out_get_phy_port; + +	/* hdmiphy i2c driver */ +	phy_node = of_parse_phandle(dev->of_node, "phy", 0); +	if (!phy_node) { +		DRM_ERROR("Failed to find hdmiphy node in device tree\n"); +		ret = -ENODEV; +		goto err_ddc; +	} + +out_get_phy_port: +	if (drv_data->is_apb_phy) { +		hdata->regs_hdmiphy = of_iomap(phy_node, 0); +		if (!hdata->regs_hdmiphy) { +			DRM_ERROR("failed to ioremap hdmi phy\n"); +			ret = -ENOMEM; +			goto err_ddc; +		} +	} else { +		hdata->hdmiphy_port = of_find_i2c_device_by_node(phy_node); +		if (!hdata->hdmiphy_port) { +			DRM_ERROR("Failed to get hdmi phy i2c client\n"); +			ret = -EPROBE_DEFER; +			goto err_ddc; +		} +	} + +	hdata->irq = gpio_to_irq(hdata->hpd_gpio); +	if (hdata->irq < 0) { +		DRM_ERROR("failed to get GPIO irq\n"); +		ret = hdata->irq; +		goto err_hdmiphy; +	} + +	hdata->hpd = gpio_get_value(hdata->hpd_gpio); + +	INIT_DELAYED_WORK(&hdata->hotplug_work, hdmi_hotplug_work_func); + +	ret = devm_request_threaded_irq(dev, hdata->irq, NULL, +			hdmi_irq_thread, IRQF_TRIGGER_RISING | +			IRQF_TRIGGER_FALLING | IRQF_ONESHOT, +			"hdmi", hdata); +	if (ret) { +		DRM_ERROR("failed to register hdmi interrupt\n"); +		goto err_hdmiphy; +	} + +	hdata->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node, +			"samsung,syscon-phandle"); +	if (IS_ERR(hdata->pmureg)) { +		DRM_ERROR("syscon regmap lookup failed.\n"); +		ret = -EPROBE_DEFER; +		goto err_hdmiphy; +	} + +	pm_runtime_enable(dev); +	hdmi_display.ctx = hdata; + +	ret = component_add(&pdev->dev, &hdmi_component_ops); +	if (ret) +		goto err_disable_pm_runtime; + +	return ret; + +err_disable_pm_runtime: +	pm_runtime_disable(dev); + +err_hdmiphy: +	if (hdata->hdmiphy_port) +		put_device(&hdata->hdmiphy_port->dev); +err_ddc: +	put_device(&hdata->ddc_adpt->dev); + +err_del_component: +	exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR); + +	return ret; +} + +static int hdmi_remove(struct platform_device *pdev) +{ +	struct hdmi_context *hdata = hdmi_display.ctx; + +	cancel_delayed_work_sync(&hdata->hotplug_work); + +	put_device(&hdata->hdmiphy_port->dev); +	put_device(&hdata->ddc_adpt->dev); + +	pm_runtime_disable(&pdev->dev); +	component_del(&pdev->dev, &hdmi_component_ops); + +	exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR); +	return 0; +} + +struct platform_driver hdmi_driver = { +	.probe		= hdmi_probe, +	.remove		= hdmi_remove, +	.driver		= { +		.name	= "exynos-hdmi", +		.owner	= THIS_MODULE, +		.of_match_table = hdmi_match_types, +	}, +}; diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c new file mode 100644 index 00000000000..7529946d0a7 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -0,0 +1,1327 @@ +/* + * Copyright (C) 2011 Samsung Electronics Co.Ltd + * Authors: + * Seung-Woo Kim <sw0312.kim@samsung.com> + *	Inki Dae <inki.dae@samsung.com> + *	Joonyoung Shim <jy0922.shim@samsung.com> + * + * Based on drivers/media/video/s5p-tv/mixer_reg.c + * + * This program is free software; you can redistribute  it and/or modify it + * under  the terms of  the GNU General  Public License as published by the + * Free Software Foundation;  either version 2 of the  License, or (at your + * option) any later version. + * + */ + +#include <drm/drmP.h> + +#include "regs-mixer.h" +#include "regs-vp.h" + +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/wait.h> +#include <linux/i2c.h> +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/delay.h> +#include <linux/pm_runtime.h> +#include <linux/clk.h> +#include <linux/regulator/consumer.h> +#include <linux/of.h> +#include <linux/component.h> + +#include <drm/exynos_drm.h> + +#include "exynos_drm_drv.h" +#include "exynos_drm_crtc.h" +#include "exynos_drm_iommu.h" +#include "exynos_mixer.h" + +#define get_mixer_manager(dev)	platform_get_drvdata(to_platform_device(dev)) + +#define MIXER_WIN_NR		3 +#define MIXER_DEFAULT_WIN	0 + +struct hdmi_win_data { +	dma_addr_t		dma_addr; +	dma_addr_t		chroma_dma_addr; +	uint32_t		pixel_format; +	unsigned int		bpp; +	unsigned int		crtc_x; +	unsigned int		crtc_y; +	unsigned int		crtc_width; +	unsigned int		crtc_height; +	unsigned int		fb_x; +	unsigned int		fb_y; +	unsigned int		fb_width; +	unsigned int		fb_height; +	unsigned int		src_width; +	unsigned int		src_height; +	unsigned int		mode_width; +	unsigned int		mode_height; +	unsigned int		scan_flags; +	bool			enabled; +	bool			resume; +}; + +struct mixer_resources { +	int			irq; +	void __iomem		*mixer_regs; +	void __iomem		*vp_regs; +	spinlock_t		reg_slock; +	struct clk		*mixer; +	struct clk		*vp; +	struct clk		*sclk_mixer; +	struct clk		*sclk_hdmi; +	struct clk		*sclk_dac; +}; + +enum mixer_version_id { +	MXR_VER_0_0_0_16, +	MXR_VER_16_0_33_0, +	MXR_VER_128_0_0_184, +}; + +struct mixer_context { +	struct platform_device *pdev; +	struct device		*dev; +	struct drm_device	*drm_dev; +	int			pipe; +	bool			interlace; +	bool			powered; +	bool			vp_enabled; +	u32			int_en; + +	struct mutex		mixer_mutex; +	struct mixer_resources	mixer_res; +	struct hdmi_win_data	win_data[MIXER_WIN_NR]; +	enum mixer_version_id	mxr_ver; +	wait_queue_head_t	wait_vsync_queue; +	atomic_t		wait_vsync_event; +}; + +struct mixer_drv_data { +	enum mixer_version_id	version; +	bool					is_vp_enabled; +}; + +static const u8 filter_y_horiz_tap8[] = { +	0,	-1,	-1,	-1,	-1,	-1,	-1,	-1, +	-1,	-1,	-1,	-1,	-1,	0,	0,	0, +	0,	2,	4,	5,	6,	6,	6,	6, +	6,	5,	5,	4,	3,	2,	1,	1, +	0,	-6,	-12,	-16,	-18,	-20,	-21,	-20, +	-20,	-18,	-16,	-13,	-10,	-8,	-5,	-2, +	127,	126,	125,	121,	114,	107,	99,	89, +	79,	68,	57,	46,	35,	25,	16,	8, +}; + +static const u8 filter_y_vert_tap4[] = { +	0,	-3,	-6,	-8,	-8,	-8,	-8,	-7, +	-6,	-5,	-4,	-3,	-2,	-1,	-1,	0, +	127,	126,	124,	118,	111,	102,	92,	81, +	70,	59,	48,	37,	27,	19,	11,	5, +	0,	5,	11,	19,	27,	37,	48,	59, +	70,	81,	92,	102,	111,	118,	124,	126, +	0,	0,	-1,	-1,	-2,	-3,	-4,	-5, +	-6,	-7,	-8,	-8,	-8,	-8,	-6,	-3, +}; + +static const u8 filter_cr_horiz_tap4[] = { +	0,	-3,	-6,	-8,	-8,	-8,	-8,	-7, +	-6,	-5,	-4,	-3,	-2,	-1,	-1,	0, +	127,	126,	124,	118,	111,	102,	92,	81, +	70,	59,	48,	37,	27,	19,	11,	5, +}; + +static inline u32 vp_reg_read(struct mixer_resources *res, u32 reg_id) +{ +	return readl(res->vp_regs + reg_id); +} + +static inline void vp_reg_write(struct mixer_resources *res, u32 reg_id, +				 u32 val) +{ +	writel(val, res->vp_regs + reg_id); +} + +static inline void vp_reg_writemask(struct mixer_resources *res, u32 reg_id, +				 u32 val, u32 mask) +{ +	u32 old = vp_reg_read(res, reg_id); + +	val = (val & mask) | (old & ~mask); +	writel(val, res->vp_regs + reg_id); +} + +static inline u32 mixer_reg_read(struct mixer_resources *res, u32 reg_id) +{ +	return readl(res->mixer_regs + reg_id); +} + +static inline void mixer_reg_write(struct mixer_resources *res, u32 reg_id, +				 u32 val) +{ +	writel(val, res->mixer_regs + reg_id); +} + +static inline void mixer_reg_writemask(struct mixer_resources *res, +				 u32 reg_id, u32 val, u32 mask) +{ +	u32 old = mixer_reg_read(res, reg_id); + +	val = (val & mask) | (old & ~mask); +	writel(val, res->mixer_regs + reg_id); +} + +static void mixer_regs_dump(struct mixer_context *ctx) +{ +#define DUMPREG(reg_id) \ +do { \ +	DRM_DEBUG_KMS(#reg_id " = %08x\n", \ +		(u32)readl(ctx->mixer_res.mixer_regs + reg_id)); \ +} while (0) + +	DUMPREG(MXR_STATUS); +	DUMPREG(MXR_CFG); +	DUMPREG(MXR_INT_EN); +	DUMPREG(MXR_INT_STATUS); + +	DUMPREG(MXR_LAYER_CFG); +	DUMPREG(MXR_VIDEO_CFG); + +	DUMPREG(MXR_GRAPHIC0_CFG); +	DUMPREG(MXR_GRAPHIC0_BASE); +	DUMPREG(MXR_GRAPHIC0_SPAN); +	DUMPREG(MXR_GRAPHIC0_WH); +	DUMPREG(MXR_GRAPHIC0_SXY); +	DUMPREG(MXR_GRAPHIC0_DXY); + +	DUMPREG(MXR_GRAPHIC1_CFG); +	DUMPREG(MXR_GRAPHIC1_BASE); +	DUMPREG(MXR_GRAPHIC1_SPAN); +	DUMPREG(MXR_GRAPHIC1_WH); +	DUMPREG(MXR_GRAPHIC1_SXY); +	DUMPREG(MXR_GRAPHIC1_DXY); +#undef DUMPREG +} + +static void vp_regs_dump(struct mixer_context *ctx) +{ +#define DUMPREG(reg_id) \ +do { \ +	DRM_DEBUG_KMS(#reg_id " = %08x\n", \ +		(u32) readl(ctx->mixer_res.vp_regs + reg_id)); \ +} while (0) + +	DUMPREG(VP_ENABLE); +	DUMPREG(VP_SRESET); +	DUMPREG(VP_SHADOW_UPDATE); +	DUMPREG(VP_FIELD_ID); +	DUMPREG(VP_MODE); +	DUMPREG(VP_IMG_SIZE_Y); +	DUMPREG(VP_IMG_SIZE_C); +	DUMPREG(VP_PER_RATE_CTRL); +	DUMPREG(VP_TOP_Y_PTR); +	DUMPREG(VP_BOT_Y_PTR); +	DUMPREG(VP_TOP_C_PTR); +	DUMPREG(VP_BOT_C_PTR); +	DUMPREG(VP_ENDIAN_MODE); +	DUMPREG(VP_SRC_H_POSITION); +	DUMPREG(VP_SRC_V_POSITION); +	DUMPREG(VP_SRC_WIDTH); +	DUMPREG(VP_SRC_HEIGHT); +	DUMPREG(VP_DST_H_POSITION); +	DUMPREG(VP_DST_V_POSITION); +	DUMPREG(VP_DST_WIDTH); +	DUMPREG(VP_DST_HEIGHT); +	DUMPREG(VP_H_RATIO); +	DUMPREG(VP_V_RATIO); + +#undef DUMPREG +} + +static inline void vp_filter_set(struct mixer_resources *res, +		int reg_id, const u8 *data, unsigned int size) +{ +	/* assure 4-byte align */ +	BUG_ON(size & 3); +	for (; size; size -= 4, reg_id += 4, data += 4) { +		u32 val = (data[0] << 24) |  (data[1] << 16) | +			(data[2] << 8) | data[3]; +		vp_reg_write(res, reg_id, val); +	} +} + +static void vp_default_filter(struct mixer_resources *res) +{ +	vp_filter_set(res, VP_POLY8_Y0_LL, +		filter_y_horiz_tap8, sizeof(filter_y_horiz_tap8)); +	vp_filter_set(res, VP_POLY4_Y0_LL, +		filter_y_vert_tap4, sizeof(filter_y_vert_tap4)); +	vp_filter_set(res, VP_POLY4_C0_LL, +		filter_cr_horiz_tap4, sizeof(filter_cr_horiz_tap4)); +} + +static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable) +{ +	struct mixer_resources *res = &ctx->mixer_res; + +	/* block update on vsync */ +	mixer_reg_writemask(res, MXR_STATUS, enable ? +			MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE); + +	if (ctx->vp_enabled) +		vp_reg_write(res, VP_SHADOW_UPDATE, enable ? +			VP_SHADOW_UPDATE_ENABLE : 0); +} + +static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height) +{ +	struct mixer_resources *res = &ctx->mixer_res; +	u32 val; + +	/* choosing between interlace and progressive mode */ +	val = (ctx->interlace ? MXR_CFG_SCAN_INTERLACE : +				MXR_CFG_SCAN_PROGRASSIVE); + +	if (ctx->mxr_ver != MXR_VER_128_0_0_184) { +		/* choosing between proper HD and SD mode */ +		if (height <= 480) +			val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD; +		else if (height <= 576) +			val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD; +		else if (height <= 720) +			val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD; +		else if (height <= 1080) +			val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD; +		else +			val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD; +	} + +	mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_SCAN_MASK); +} + +static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height) +{ +	struct mixer_resources *res = &ctx->mixer_res; +	u32 val; + +	if (height == 480) { +		val = MXR_CFG_RGB601_0_255; +	} else if (height == 576) { +		val = MXR_CFG_RGB601_0_255; +	} else if (height == 720) { +		val = MXR_CFG_RGB709_16_235; +		mixer_reg_write(res, MXR_CM_COEFF_Y, +				(1 << 30) | (94 << 20) | (314 << 10) | +				(32 << 0)); +		mixer_reg_write(res, MXR_CM_COEFF_CB, +				(972 << 20) | (851 << 10) | (225 << 0)); +		mixer_reg_write(res, MXR_CM_COEFF_CR, +				(225 << 20) | (820 << 10) | (1004 << 0)); +	} else if (height == 1080) { +		val = MXR_CFG_RGB709_16_235; +		mixer_reg_write(res, MXR_CM_COEFF_Y, +				(1 << 30) | (94 << 20) | (314 << 10) | +				(32 << 0)); +		mixer_reg_write(res, MXR_CM_COEFF_CB, +				(972 << 20) | (851 << 10) | (225 << 0)); +		mixer_reg_write(res, MXR_CM_COEFF_CR, +				(225 << 20) | (820 << 10) | (1004 << 0)); +	} else { +		val = MXR_CFG_RGB709_16_235; +		mixer_reg_write(res, MXR_CM_COEFF_Y, +				(1 << 30) | (94 << 20) | (314 << 10) | +				(32 << 0)); +		mixer_reg_write(res, MXR_CM_COEFF_CB, +				(972 << 20) | (851 << 10) | (225 << 0)); +		mixer_reg_write(res, MXR_CM_COEFF_CR, +				(225 << 20) | (820 << 10) | (1004 << 0)); +	} + +	mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK); +} + +static void mixer_cfg_layer(struct mixer_context *ctx, int win, bool enable) +{ +	struct mixer_resources *res = &ctx->mixer_res; +	u32 val = enable ? ~0 : 0; + +	switch (win) { +	case 0: +		mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP0_ENABLE); +		break; +	case 1: +		mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP1_ENABLE); +		break; +	case 2: +		if (ctx->vp_enabled) { +			vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON); +			mixer_reg_writemask(res, MXR_CFG, val, +				MXR_CFG_VP_ENABLE); +		} +		break; +	} +} + +static void mixer_run(struct mixer_context *ctx) +{ +	struct mixer_resources *res = &ctx->mixer_res; + +	mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_REG_RUN); + +	mixer_regs_dump(ctx); +} + +static void mixer_stop(struct mixer_context *ctx) +{ +	struct mixer_resources *res = &ctx->mixer_res; +	int timeout = 20; + +	mixer_reg_writemask(res, MXR_STATUS, 0, MXR_STATUS_REG_RUN); + +	while (!(mixer_reg_read(res, MXR_STATUS) & MXR_STATUS_REG_IDLE) && +			--timeout) +		usleep_range(10000, 12000); + +	mixer_regs_dump(ctx); +} + +static void vp_video_buffer(struct mixer_context *ctx, int win) +{ +	struct mixer_resources *res = &ctx->mixer_res; +	unsigned long flags; +	struct hdmi_win_data *win_data; +	unsigned int x_ratio, y_ratio; +	unsigned int buf_num = 1; +	dma_addr_t luma_addr[2], chroma_addr[2]; +	bool tiled_mode = false; +	bool crcb_mode = false; +	u32 val; + +	win_data = &ctx->win_data[win]; + +	switch (win_data->pixel_format) { +	case DRM_FORMAT_NV12MT: +		tiled_mode = true; +	case DRM_FORMAT_NV12: +		crcb_mode = false; +		buf_num = 2; +		break; +	/* TODO: single buffer format NV12, NV21 */ +	default: +		/* ignore pixel format at disable time */ +		if (!win_data->dma_addr) +			break; + +		DRM_ERROR("pixel format for vp is wrong [%d].\n", +				win_data->pixel_format); +		return; +	} + +	/* scaling feature: (src << 16) / dst */ +	x_ratio = (win_data->src_width << 16) / win_data->crtc_width; +	y_ratio = (win_data->src_height << 16) / win_data->crtc_height; + +	if (buf_num == 2) { +		luma_addr[0] = win_data->dma_addr; +		chroma_addr[0] = win_data->chroma_dma_addr; +	} else { +		luma_addr[0] = win_data->dma_addr; +		chroma_addr[0] = win_data->dma_addr +			+ (win_data->fb_width * win_data->fb_height); +	} + +	if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) { +		ctx->interlace = true; +		if (tiled_mode) { +			luma_addr[1] = luma_addr[0] + 0x40; +			chroma_addr[1] = chroma_addr[0] + 0x40; +		} else { +			luma_addr[1] = luma_addr[0] + win_data->fb_width; +			chroma_addr[1] = chroma_addr[0] + win_data->fb_width; +		} +	} else { +		ctx->interlace = false; +		luma_addr[1] = 0; +		chroma_addr[1] = 0; +	} + +	spin_lock_irqsave(&res->reg_slock, flags); +	mixer_vsync_set_update(ctx, false); + +	/* interlace or progressive scan mode */ +	val = (ctx->interlace ? ~0 : 0); +	vp_reg_writemask(res, VP_MODE, val, VP_MODE_LINE_SKIP); + +	/* setup format */ +	val = (crcb_mode ? VP_MODE_NV21 : VP_MODE_NV12); +	val |= (tiled_mode ? VP_MODE_MEM_TILED : VP_MODE_MEM_LINEAR); +	vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK); + +	/* setting size of input image */ +	vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_width) | +		VP_IMG_VSIZE(win_data->fb_height)); +	/* chroma height has to reduced by 2 to avoid chroma distorions */ +	vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_width) | +		VP_IMG_VSIZE(win_data->fb_height / 2)); + +	vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width); +	vp_reg_write(res, VP_SRC_HEIGHT, win_data->src_height); +	vp_reg_write(res, VP_SRC_H_POSITION, +			VP_SRC_H_POSITION_VAL(win_data->fb_x)); +	vp_reg_write(res, VP_SRC_V_POSITION, win_data->fb_y); + +	vp_reg_write(res, VP_DST_WIDTH, win_data->crtc_width); +	vp_reg_write(res, VP_DST_H_POSITION, win_data->crtc_x); +	if (ctx->interlace) { +		vp_reg_write(res, VP_DST_HEIGHT, win_data->crtc_height / 2); +		vp_reg_write(res, VP_DST_V_POSITION, win_data->crtc_y / 2); +	} else { +		vp_reg_write(res, VP_DST_HEIGHT, win_data->crtc_height); +		vp_reg_write(res, VP_DST_V_POSITION, win_data->crtc_y); +	} + +	vp_reg_write(res, VP_H_RATIO, x_ratio); +	vp_reg_write(res, VP_V_RATIO, y_ratio); + +	vp_reg_write(res, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE); + +	/* set buffer address to vp */ +	vp_reg_write(res, VP_TOP_Y_PTR, luma_addr[0]); +	vp_reg_write(res, VP_BOT_Y_PTR, luma_addr[1]); +	vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]); +	vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]); + +	mixer_cfg_scan(ctx, win_data->mode_height); +	mixer_cfg_rgb_fmt(ctx, win_data->mode_height); +	mixer_cfg_layer(ctx, win, true); +	mixer_run(ctx); + +	mixer_vsync_set_update(ctx, true); +	spin_unlock_irqrestore(&res->reg_slock, flags); + +	vp_regs_dump(ctx); +} + +static void mixer_layer_update(struct mixer_context *ctx) +{ +	struct mixer_resources *res = &ctx->mixer_res; + +	mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE); +} + +static void mixer_graph_buffer(struct mixer_context *ctx, int win) +{ +	struct mixer_resources *res = &ctx->mixer_res; +	unsigned long flags; +	struct hdmi_win_data *win_data; +	unsigned int x_ratio, y_ratio; +	unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset; +	dma_addr_t dma_addr; +	unsigned int fmt; +	u32 val; + +	win_data = &ctx->win_data[win]; + +	#define RGB565 4 +	#define ARGB1555 5 +	#define ARGB4444 6 +	#define ARGB8888 7 + +	switch (win_data->bpp) { +	case 16: +		fmt = ARGB4444; +		break; +	case 32: +		fmt = ARGB8888; +		break; +	default: +		fmt = ARGB8888; +	} + +	/* 2x scaling feature */ +	x_ratio = 0; +	y_ratio = 0; + +	dst_x_offset = win_data->crtc_x; +	dst_y_offset = win_data->crtc_y; + +	/* converting dma address base and source offset */ +	dma_addr = win_data->dma_addr +		+ (win_data->fb_x * win_data->bpp >> 3) +		+ (win_data->fb_y * win_data->fb_width * win_data->bpp >> 3); +	src_x_offset = 0; +	src_y_offset = 0; + +	if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) +		ctx->interlace = true; +	else +		ctx->interlace = false; + +	spin_lock_irqsave(&res->reg_slock, flags); +	mixer_vsync_set_update(ctx, false); + +	/* setup format */ +	mixer_reg_writemask(res, MXR_GRAPHIC_CFG(win), +		MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK); + +	/* setup geometry */ +	mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), win_data->fb_width); + +	/* setup display size */ +	if (ctx->mxr_ver == MXR_VER_128_0_0_184 && +		win == MIXER_DEFAULT_WIN) { +		val  = MXR_MXR_RES_HEIGHT(win_data->fb_height); +		val |= MXR_MXR_RES_WIDTH(win_data->fb_width); +		mixer_reg_write(res, MXR_RESOLUTION, val); +	} + +	val  = MXR_GRP_WH_WIDTH(win_data->crtc_width); +	val |= MXR_GRP_WH_HEIGHT(win_data->crtc_height); +	val |= MXR_GRP_WH_H_SCALE(x_ratio); +	val |= MXR_GRP_WH_V_SCALE(y_ratio); +	mixer_reg_write(res, MXR_GRAPHIC_WH(win), val); + +	/* setup offsets in source image */ +	val  = MXR_GRP_SXY_SX(src_x_offset); +	val |= MXR_GRP_SXY_SY(src_y_offset); +	mixer_reg_write(res, MXR_GRAPHIC_SXY(win), val); + +	/* setup offsets in display image */ +	val  = MXR_GRP_DXY_DX(dst_x_offset); +	val |= MXR_GRP_DXY_DY(dst_y_offset); +	mixer_reg_write(res, MXR_GRAPHIC_DXY(win), val); + +	/* set buffer address to mixer */ +	mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr); + +	mixer_cfg_scan(ctx, win_data->mode_height); +	mixer_cfg_rgb_fmt(ctx, win_data->mode_height); +	mixer_cfg_layer(ctx, win, true); + +	/* layer update mandatory for mixer 16.0.33.0 */ +	if (ctx->mxr_ver == MXR_VER_16_0_33_0 || +		ctx->mxr_ver == MXR_VER_128_0_0_184) +		mixer_layer_update(ctx); + +	mixer_run(ctx); + +	mixer_vsync_set_update(ctx, true); +	spin_unlock_irqrestore(&res->reg_slock, flags); +} + +static void vp_win_reset(struct mixer_context *ctx) +{ +	struct mixer_resources *res = &ctx->mixer_res; +	int tries = 100; + +	vp_reg_write(res, VP_SRESET, VP_SRESET_PROCESSING); +	for (tries = 100; tries; --tries) { +		/* waiting until VP_SRESET_PROCESSING is 0 */ +		if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING) +			break; +		usleep_range(10000, 12000); +	} +	WARN(tries == 0, "failed to reset Video Processor\n"); +} + +static void mixer_win_reset(struct mixer_context *ctx) +{ +	struct mixer_resources *res = &ctx->mixer_res; +	unsigned long flags; +	u32 val; /* value stored to register */ + +	spin_lock_irqsave(&res->reg_slock, flags); +	mixer_vsync_set_update(ctx, false); + +	mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK); + +	/* set output in RGB888 mode */ +	mixer_reg_writemask(res, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK); + +	/* 16 beat burst in DMA */ +	mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST, +		MXR_STATUS_BURST_MASK); + +	/* setting default layer priority: layer1 > layer0 > video +	 * because typical usage scenario would be +	 * layer1 - OSD +	 * layer0 - framebuffer +	 * video - video overlay +	 */ +	val = MXR_LAYER_CFG_GRP1_VAL(3); +	val |= MXR_LAYER_CFG_GRP0_VAL(2); +	if (ctx->vp_enabled) +		val |= MXR_LAYER_CFG_VP_VAL(1); +	mixer_reg_write(res, MXR_LAYER_CFG, val); + +	/* setting background color */ +	mixer_reg_write(res, MXR_BG_COLOR0, 0x008080); +	mixer_reg_write(res, MXR_BG_COLOR1, 0x008080); +	mixer_reg_write(res, MXR_BG_COLOR2, 0x008080); + +	/* setting graphical layers */ +	val  = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */ +	val |= MXR_GRP_CFG_WIN_BLEND_EN; +	val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */ + +	/* Don't blend layer 0 onto the mixer background */ +	mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val); + +	/* Blend layer 1 into layer 0 */ +	val |= MXR_GRP_CFG_BLEND_PRE_MUL; +	val |= MXR_GRP_CFG_PIXEL_BLEND_EN; +	mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val); + +	/* setting video layers */ +	val = MXR_GRP_CFG_ALPHA_VAL(0); +	mixer_reg_write(res, MXR_VIDEO_CFG, val); + +	if (ctx->vp_enabled) { +		/* configuration of Video Processor Registers */ +		vp_win_reset(ctx); +		vp_default_filter(res); +	} + +	/* disable all layers */ +	mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE); +	mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE); +	if (ctx->vp_enabled) +		mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE); + +	mixer_vsync_set_update(ctx, true); +	spin_unlock_irqrestore(&res->reg_slock, flags); +} + +static irqreturn_t mixer_irq_handler(int irq, void *arg) +{ +	struct mixer_context *ctx = arg; +	struct mixer_resources *res = &ctx->mixer_res; +	u32 val, base, shadow; + +	spin_lock(&res->reg_slock); + +	/* read interrupt status for handling and clearing flags for VSYNC */ +	val = mixer_reg_read(res, MXR_INT_STATUS); + +	/* handling VSYNC */ +	if (val & MXR_INT_STATUS_VSYNC) { +		/* interlace scan need to check shadow register */ +		if (ctx->interlace) { +			base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0)); +			shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0)); +			if (base != shadow) +				goto out; + +			base = mixer_reg_read(res, MXR_GRAPHIC_BASE(1)); +			shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1)); +			if (base != shadow) +				goto out; +		} + +		drm_handle_vblank(ctx->drm_dev, ctx->pipe); +		exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); + +		/* set wait vsync event to zero and wake up queue. */ +		if (atomic_read(&ctx->wait_vsync_event)) { +			atomic_set(&ctx->wait_vsync_event, 0); +			wake_up(&ctx->wait_vsync_queue); +		} +	} + +out: +	/* clear interrupts */ +	if (~val & MXR_INT_EN_VSYNC) { +		/* vsync interrupt use different bit for read and clear */ +		val &= ~MXR_INT_EN_VSYNC; +		val |= MXR_INT_CLEAR_VSYNC; +	} +	mixer_reg_write(res, MXR_INT_STATUS, val); + +	spin_unlock(&res->reg_slock); + +	return IRQ_HANDLED; +} + +static int mixer_resources_init(struct mixer_context *mixer_ctx) +{ +	struct device *dev = &mixer_ctx->pdev->dev; +	struct mixer_resources *mixer_res = &mixer_ctx->mixer_res; +	struct resource *res; +	int ret; + +	spin_lock_init(&mixer_res->reg_slock); + +	mixer_res->mixer = devm_clk_get(dev, "mixer"); +	if (IS_ERR(mixer_res->mixer)) { +		dev_err(dev, "failed to get clock 'mixer'\n"); +		return -ENODEV; +	} + +	mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi"); +	if (IS_ERR(mixer_res->sclk_hdmi)) { +		dev_err(dev, "failed to get clock 'sclk_hdmi'\n"); +		return -ENODEV; +	} +	res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_MEM, 0); +	if (res == NULL) { +		dev_err(dev, "get memory resource failed.\n"); +		return -ENXIO; +	} + +	mixer_res->mixer_regs = devm_ioremap(dev, res->start, +							resource_size(res)); +	if (mixer_res->mixer_regs == NULL) { +		dev_err(dev, "register mapping failed.\n"); +		return -ENXIO; +	} + +	res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_IRQ, 0); +	if (res == NULL) { +		dev_err(dev, "get interrupt resource failed.\n"); +		return -ENXIO; +	} + +	ret = devm_request_irq(dev, res->start, mixer_irq_handler, +						0, "drm_mixer", mixer_ctx); +	if (ret) { +		dev_err(dev, "request interrupt failed.\n"); +		return ret; +	} +	mixer_res->irq = res->start; + +	return 0; +} + +static int vp_resources_init(struct mixer_context *mixer_ctx) +{ +	struct device *dev = &mixer_ctx->pdev->dev; +	struct mixer_resources *mixer_res = &mixer_ctx->mixer_res; +	struct resource *res; + +	mixer_res->vp = devm_clk_get(dev, "vp"); +	if (IS_ERR(mixer_res->vp)) { +		dev_err(dev, "failed to get clock 'vp'\n"); +		return -ENODEV; +	} +	mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer"); +	if (IS_ERR(mixer_res->sclk_mixer)) { +		dev_err(dev, "failed to get clock 'sclk_mixer'\n"); +		return -ENODEV; +	} +	mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac"); +	if (IS_ERR(mixer_res->sclk_dac)) { +		dev_err(dev, "failed to get clock 'sclk_dac'\n"); +		return -ENODEV; +	} + +	if (mixer_res->sclk_hdmi) +		clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi); + +	res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_MEM, 1); +	if (res == NULL) { +		dev_err(dev, "get memory resource failed.\n"); +		return -ENXIO; +	} + +	mixer_res->vp_regs = devm_ioremap(dev, res->start, +							resource_size(res)); +	if (mixer_res->vp_regs == NULL) { +		dev_err(dev, "register mapping failed.\n"); +		return -ENXIO; +	} + +	return 0; +} + +static int mixer_initialize(struct exynos_drm_manager *mgr, +			struct drm_device *drm_dev) +{ +	int ret; +	struct mixer_context *mixer_ctx = mgr->ctx; +	struct exynos_drm_private *priv; +	priv = drm_dev->dev_private; + +	mgr->drm_dev = mixer_ctx->drm_dev = drm_dev; +	mgr->pipe = mixer_ctx->pipe = priv->pipe++; + +	/* acquire resources: regs, irqs, clocks */ +	ret = mixer_resources_init(mixer_ctx); +	if (ret) { +		DRM_ERROR("mixer_resources_init failed ret=%d\n", ret); +		return ret; +	} + +	if (mixer_ctx->vp_enabled) { +		/* acquire vp resources: regs, irqs, clocks */ +		ret = vp_resources_init(mixer_ctx); +		if (ret) { +			DRM_ERROR("vp_resources_init failed ret=%d\n", ret); +			return ret; +		} +	} + +	if (!is_drm_iommu_supported(mixer_ctx->drm_dev)) +		return 0; + +	return drm_iommu_attach_device(mixer_ctx->drm_dev, mixer_ctx->dev); +} + +static void mixer_mgr_remove(struct exynos_drm_manager *mgr) +{ +	struct mixer_context *mixer_ctx = mgr->ctx; + +	if (is_drm_iommu_supported(mixer_ctx->drm_dev)) +		drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev); +} + +static int mixer_enable_vblank(struct exynos_drm_manager *mgr) +{ +	struct mixer_context *mixer_ctx = mgr->ctx; +	struct mixer_resources *res = &mixer_ctx->mixer_res; + +	if (!mixer_ctx->powered) { +		mixer_ctx->int_en |= MXR_INT_EN_VSYNC; +		return 0; +	} + +	/* enable vsync interrupt */ +	mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC, +			MXR_INT_EN_VSYNC); + +	return 0; +} + +static void mixer_disable_vblank(struct exynos_drm_manager *mgr) +{ +	struct mixer_context *mixer_ctx = mgr->ctx; +	struct mixer_resources *res = &mixer_ctx->mixer_res; + +	/* disable vsync interrupt */ +	mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC); +} + +static void mixer_win_mode_set(struct exynos_drm_manager *mgr, +			struct exynos_drm_overlay *overlay) +{ +	struct mixer_context *mixer_ctx = mgr->ctx; +	struct hdmi_win_data *win_data; +	int win; + +	if (!overlay) { +		DRM_ERROR("overlay is NULL\n"); +		return; +	} + +	DRM_DEBUG_KMS("set [%d]x[%d] at (%d,%d) to [%d]x[%d] at (%d,%d)\n", +				 overlay->fb_width, overlay->fb_height, +				 overlay->fb_x, overlay->fb_y, +				 overlay->crtc_width, overlay->crtc_height, +				 overlay->crtc_x, overlay->crtc_y); + +	win = overlay->zpos; +	if (win == DEFAULT_ZPOS) +		win = MIXER_DEFAULT_WIN; + +	if (win < 0 || win >= MIXER_WIN_NR) { +		DRM_ERROR("mixer window[%d] is wrong\n", win); +		return; +	} + +	win_data = &mixer_ctx->win_data[win]; + +	win_data->dma_addr = overlay->dma_addr[0]; +	win_data->chroma_dma_addr = overlay->dma_addr[1]; +	win_data->pixel_format = overlay->pixel_format; +	win_data->bpp = overlay->bpp; + +	win_data->crtc_x = overlay->crtc_x; +	win_data->crtc_y = overlay->crtc_y; +	win_data->crtc_width = overlay->crtc_width; +	win_data->crtc_height = overlay->crtc_height; + +	win_data->fb_x = overlay->fb_x; +	win_data->fb_y = overlay->fb_y; +	win_data->fb_width = overlay->fb_width; +	win_data->fb_height = overlay->fb_height; +	win_data->src_width = overlay->src_width; +	win_data->src_height = overlay->src_height; + +	win_data->mode_width = overlay->mode_width; +	win_data->mode_height = overlay->mode_height; + +	win_data->scan_flags = overlay->scan_flag; +} + +static void mixer_win_commit(struct exynos_drm_manager *mgr, int zpos) +{ +	struct mixer_context *mixer_ctx = mgr->ctx; +	int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos; + +	DRM_DEBUG_KMS("win: %d\n", win); + +	mutex_lock(&mixer_ctx->mixer_mutex); +	if (!mixer_ctx->powered) { +		mutex_unlock(&mixer_ctx->mixer_mutex); +		return; +	} +	mutex_unlock(&mixer_ctx->mixer_mutex); + +	if (win > 1 && mixer_ctx->vp_enabled) +		vp_video_buffer(mixer_ctx, win); +	else +		mixer_graph_buffer(mixer_ctx, win); + +	mixer_ctx->win_data[win].enabled = true; +} + +static void mixer_win_disable(struct exynos_drm_manager *mgr, int zpos) +{ +	struct mixer_context *mixer_ctx = mgr->ctx; +	struct mixer_resources *res = &mixer_ctx->mixer_res; +	int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos; +	unsigned long flags; + +	DRM_DEBUG_KMS("win: %d\n", win); + +	mutex_lock(&mixer_ctx->mixer_mutex); +	if (!mixer_ctx->powered) { +		mutex_unlock(&mixer_ctx->mixer_mutex); +		mixer_ctx->win_data[win].resume = false; +		return; +	} +	mutex_unlock(&mixer_ctx->mixer_mutex); + +	spin_lock_irqsave(&res->reg_slock, flags); +	mixer_vsync_set_update(mixer_ctx, false); + +	mixer_cfg_layer(mixer_ctx, win, false); + +	mixer_vsync_set_update(mixer_ctx, true); +	spin_unlock_irqrestore(&res->reg_slock, flags); + +	mixer_ctx->win_data[win].enabled = false; +} + +static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr) +{ +	struct mixer_context *mixer_ctx = mgr->ctx; + +	mutex_lock(&mixer_ctx->mixer_mutex); +	if (!mixer_ctx->powered) { +		mutex_unlock(&mixer_ctx->mixer_mutex); +		return; +	} +	mutex_unlock(&mixer_ctx->mixer_mutex); + +	drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe); + +	atomic_set(&mixer_ctx->wait_vsync_event, 1); + +	/* +	 * wait for MIXER to signal VSYNC interrupt or return after +	 * timeout which is set to 50ms (refresh rate of 20). +	 */ +	if (!wait_event_timeout(mixer_ctx->wait_vsync_queue, +				!atomic_read(&mixer_ctx->wait_vsync_event), +				HZ/20)) +		DRM_DEBUG_KMS("vblank wait timed out.\n"); + +	drm_vblank_put(mgr->crtc->dev, mixer_ctx->pipe); +} + +static void mixer_window_suspend(struct exynos_drm_manager *mgr) +{ +	struct mixer_context *ctx = mgr->ctx; +	struct hdmi_win_data *win_data; +	int i; + +	for (i = 0; i < MIXER_WIN_NR; i++) { +		win_data = &ctx->win_data[i]; +		win_data->resume = win_data->enabled; +		mixer_win_disable(mgr, i); +	} +	mixer_wait_for_vblank(mgr); +} + +static void mixer_window_resume(struct exynos_drm_manager *mgr) +{ +	struct mixer_context *ctx = mgr->ctx; +	struct hdmi_win_data *win_data; +	int i; + +	for (i = 0; i < MIXER_WIN_NR; i++) { +		win_data = &ctx->win_data[i]; +		win_data->enabled = win_data->resume; +		win_data->resume = false; +		if (win_data->enabled) +			mixer_win_commit(mgr, i); +	} +} + +static void mixer_poweron(struct exynos_drm_manager *mgr) +{ +	struct mixer_context *ctx = mgr->ctx; +	struct mixer_resources *res = &ctx->mixer_res; + +	mutex_lock(&ctx->mixer_mutex); +	if (ctx->powered) { +		mutex_unlock(&ctx->mixer_mutex); +		return; +	} + +	mutex_unlock(&ctx->mixer_mutex); + +	pm_runtime_get_sync(ctx->dev); + +	clk_prepare_enable(res->mixer); +	if (ctx->vp_enabled) { +		clk_prepare_enable(res->vp); +		clk_prepare_enable(res->sclk_mixer); +	} + +	mutex_lock(&ctx->mixer_mutex); +	ctx->powered = true; +	mutex_unlock(&ctx->mixer_mutex); + +	mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET); + +	mixer_reg_write(res, MXR_INT_EN, ctx->int_en); +	mixer_win_reset(ctx); + +	mixer_window_resume(mgr); +} + +static void mixer_poweroff(struct exynos_drm_manager *mgr) +{ +	struct mixer_context *ctx = mgr->ctx; +	struct mixer_resources *res = &ctx->mixer_res; + +	mutex_lock(&ctx->mixer_mutex); +	if (!ctx->powered) { +		mutex_unlock(&ctx->mixer_mutex); +		return; +	} +	mutex_unlock(&ctx->mixer_mutex); + +	mixer_stop(ctx); +	mixer_window_suspend(mgr); + +	ctx->int_en = mixer_reg_read(res, MXR_INT_EN); + +	mutex_lock(&ctx->mixer_mutex); +	ctx->powered = false; +	mutex_unlock(&ctx->mixer_mutex); + +	clk_disable_unprepare(res->mixer); +	if (ctx->vp_enabled) { +		clk_disable_unprepare(res->vp); +		clk_disable_unprepare(res->sclk_mixer); +	} + +	pm_runtime_put_sync(ctx->dev); +} + +static void mixer_dpms(struct exynos_drm_manager *mgr, int mode) +{ +	switch (mode) { +	case DRM_MODE_DPMS_ON: +		mixer_poweron(mgr); +		break; +	case DRM_MODE_DPMS_STANDBY: +	case DRM_MODE_DPMS_SUSPEND: +	case DRM_MODE_DPMS_OFF: +		mixer_poweroff(mgr); +		break; +	default: +		DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode); +		break; +	} +} + +/* Only valid for Mixer version 16.0.33.0 */ +int mixer_check_mode(struct drm_display_mode *mode) +{ +	u32 w, h; + +	w = mode->hdisplay; +	h = mode->vdisplay; + +	DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d\n", +		mode->hdisplay, mode->vdisplay, mode->vrefresh, +		(mode->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0); + +	if ((w >= 464 && w <= 720 && h >= 261 && h <= 576) || +		(w >= 1024 && w <= 1280 && h >= 576 && h <= 720) || +		(w >= 1664 && w <= 1920 && h >= 936 && h <= 1080)) +		return 0; + +	return -EINVAL; +} + +static struct exynos_drm_manager_ops mixer_manager_ops = { +	.dpms			= mixer_dpms, +	.enable_vblank		= mixer_enable_vblank, +	.disable_vblank		= mixer_disable_vblank, +	.wait_for_vblank	= mixer_wait_for_vblank, +	.win_mode_set		= mixer_win_mode_set, +	.win_commit		= mixer_win_commit, +	.win_disable		= mixer_win_disable, +}; + +static struct exynos_drm_manager mixer_manager = { +	.type			= EXYNOS_DISPLAY_TYPE_HDMI, +	.ops			= &mixer_manager_ops, +}; + +static struct mixer_drv_data exynos5420_mxr_drv_data = { +	.version = MXR_VER_128_0_0_184, +	.is_vp_enabled = 0, +}; + +static struct mixer_drv_data exynos5250_mxr_drv_data = { +	.version = MXR_VER_16_0_33_0, +	.is_vp_enabled = 0, +}; + +static struct mixer_drv_data exynos4210_mxr_drv_data = { +	.version = MXR_VER_0_0_0_16, +	.is_vp_enabled = 1, +}; + +static struct platform_device_id mixer_driver_types[] = { +	{ +		.name		= "s5p-mixer", +		.driver_data	= (unsigned long)&exynos4210_mxr_drv_data, +	}, { +		.name		= "exynos5-mixer", +		.driver_data	= (unsigned long)&exynos5250_mxr_drv_data, +	}, { +		/* end node */ +	} +}; + +static struct of_device_id mixer_match_types[] = { +	{ +		.compatible = "samsung,exynos5-mixer", +		.data	= &exynos5250_mxr_drv_data, +	}, { +		.compatible = "samsung,exynos5250-mixer", +		.data	= &exynos5250_mxr_drv_data, +	}, { +		.compatible = "samsung,exynos5420-mixer", +		.data	= &exynos5420_mxr_drv_data, +	}, { +		/* end node */ +	} +}; + +static int mixer_bind(struct device *dev, struct device *manager, void *data) +{ +	struct platform_device *pdev = to_platform_device(dev); +	struct drm_device *drm_dev = data; +	struct mixer_context *ctx; +	struct mixer_drv_data *drv; +	int ret; + +	dev_info(dev, "probe start\n"); + +	ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); +	if (!ctx) { +		DRM_ERROR("failed to alloc mixer context.\n"); +		return -ENOMEM; +	} + +	mutex_init(&ctx->mixer_mutex); + +	if (dev->of_node) { +		const struct of_device_id *match; +		match = of_match_node(mixer_match_types, dev->of_node); +		drv = (struct mixer_drv_data *)match->data; +	} else { +		drv = (struct mixer_drv_data *) +			platform_get_device_id(pdev)->driver_data; +	} + +	ctx->pdev = pdev; +	ctx->dev = dev; +	ctx->vp_enabled = drv->is_vp_enabled; +	ctx->mxr_ver = drv->version; +	init_waitqueue_head(&ctx->wait_vsync_queue); +	atomic_set(&ctx->wait_vsync_event, 0); + +	mixer_manager.ctx = ctx; +	ret = mixer_initialize(&mixer_manager, drm_dev); +	if (ret) +		return ret; + +	platform_set_drvdata(pdev, &mixer_manager); +	ret = exynos_drm_crtc_create(&mixer_manager); +	if (ret) { +		mixer_mgr_remove(&mixer_manager); +		return ret; +	} + +	pm_runtime_enable(dev); + +	return 0; +} + +static void mixer_unbind(struct device *dev, struct device *master, void *data) +{ +	struct exynos_drm_manager *mgr = dev_get_drvdata(dev); +	struct drm_crtc *crtc = mgr->crtc; + +	dev_info(dev, "remove successful\n"); + +	mixer_mgr_remove(mgr); + +	pm_runtime_disable(dev); + +	crtc->funcs->destroy(crtc); +} + +static const struct component_ops mixer_component_ops = { +	.bind	= mixer_bind, +	.unbind	= mixer_unbind, +}; + +static int mixer_probe(struct platform_device *pdev) +{ +	int ret; + +	ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC, +					mixer_manager.type); +	if (ret) +		return ret; + +	ret = component_add(&pdev->dev, &mixer_component_ops); +	if (ret) +		exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC); + +	return ret; +} + +static int mixer_remove(struct platform_device *pdev) +{ +	component_del(&pdev->dev, &mixer_component_ops); +	exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC); + +	return 0; +} + +struct platform_driver mixer_driver = { +	.driver = { +		.name = "exynos-mixer", +		.owner = THIS_MODULE, +		.of_match_table = mixer_match_types, +	}, +	.probe = mixer_probe, +	.remove = mixer_remove, +	.id_table	= mixer_driver_types, +}; diff --git a/drivers/gpu/drm/exynos/exynos_mixer.h b/drivers/gpu/drm/exynos/exynos_mixer.h new file mode 100644 index 00000000000..3811e417f0e --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_mixer.h @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + */ + +#ifndef _EXYNOS_MIXER_H_ +#define _EXYNOS_MIXER_H_ + +/* This function returns 0 if the given timing is valid for the mixer */ +int mixer_check_mode(struct drm_display_mode *mode); + +#endif diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h new file mode 100644 index 00000000000..30496134a3d --- /dev/null +++ b/drivers/gpu/drm/exynos/regs-fimc.h @@ -0,0 +1,668 @@ +/* drivers/gpu/drm/exynos/regs-fimc.h + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + *		http://www.samsung.com/ + * + * Register definition file for Samsung Camera Interface (FIMC) driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef EXYNOS_REGS_FIMC_H +#define EXYNOS_REGS_FIMC_H + +/* + * Register part +*/ +/* Input source format */ +#define EXYNOS_CISRCFMT		(0x00) +/* Window offset */ +#define EXYNOS_CIWDOFST		(0x04) +/* Global control */ +#define EXYNOS_CIGCTRL		(0x08) +/* Window offset 2 */ +#define EXYNOS_CIWDOFST2	(0x14) +/* Y 1st frame start address for output DMA */ +#define EXYNOS_CIOYSA1		(0x18) +/* Y 2nd frame start address for output DMA */ +#define EXYNOS_CIOYSA2		(0x1c) +/* Y 3rd frame start address for output DMA */ +#define EXYNOS_CIOYSA3		(0x20) +/* Y 4th frame start address for output DMA */ +#define EXYNOS_CIOYSA4		(0x24) +/* Cb 1st frame start address for output DMA */ +#define EXYNOS_CIOCBSA1		(0x28) +/* Cb 2nd frame start address for output DMA */ +#define EXYNOS_CIOCBSA2		(0x2c) +/* Cb 3rd frame start address for output DMA */ +#define EXYNOS_CIOCBSA3		(0x30) +/* Cb 4th frame start address for output DMA */ +#define EXYNOS_CIOCBSA4		(0x34) +/* Cr 1st frame start address for output DMA */ +#define EXYNOS_CIOCRSA1		(0x38) +/* Cr 2nd frame start address for output DMA */ +#define EXYNOS_CIOCRSA2		(0x3c) +/* Cr 3rd frame start address for output DMA */ +#define EXYNOS_CIOCRSA3		(0x40) +/* Cr 4th frame start address for output DMA */ +#define EXYNOS_CIOCRSA4		(0x44) +/* Target image format */ +#define EXYNOS_CITRGFMT		(0x48) +/* Output DMA control */ +#define EXYNOS_CIOCTRL		(0x4c) +/* Pre-scaler control 1 */ +#define EXYNOS_CISCPRERATIO	(0x50) +/* Pre-scaler control 2 */ +#define EXYNOS_CISCPREDST		(0x54) +/* Main scaler control */ +#define EXYNOS_CISCCTRL		(0x58) +/* Target area */ +#define EXYNOS_CITAREA		(0x5c) +/* Status */ +#define EXYNOS_CISTATUS		(0x64) +/* Status2 */ +#define EXYNOS_CISTATUS2		(0x68) +/* Image capture enable command */ +#define EXYNOS_CIIMGCPT		(0xc0) +/* Capture sequence */ +#define EXYNOS_CICPTSEQ		(0xc4) +/* Image effects */ +#define EXYNOS_CIIMGEFF		(0xd0) +/* Y frame start address for input DMA */ +#define EXYNOS_CIIYSA0		(0xd4) +/* Cb frame start address for input DMA */ +#define EXYNOS_CIICBSA0		(0xd8) +/* Cr frame start address for input DMA */ +#define EXYNOS_CIICRSA0		(0xdc) +/* Input DMA Y Line Skip */ +#define EXYNOS_CIILINESKIP_Y	(0xec) +/* Input DMA Cb Line Skip */ +#define EXYNOS_CIILINESKIP_CB	(0xf0) +/* Input DMA Cr Line Skip */ +#define EXYNOS_CIILINESKIP_CR	(0xf4) +/* Real input DMA image size */ +#define EXYNOS_CIREAL_ISIZE	(0xf8) +/* Input DMA control */ +#define EXYNOS_MSCTRL		(0xfc) +/* Y frame start address for input DMA */ +#define EXYNOS_CIIYSA1		(0x144) +/* Cb frame start address for input DMA */ +#define EXYNOS_CIICBSA1		(0x148) +/* Cr frame start address for input DMA */ +#define EXYNOS_CIICRSA1		(0x14c) +/* Output DMA Y offset */ +#define EXYNOS_CIOYOFF		(0x168) +/* Output DMA CB offset */ +#define EXYNOS_CIOCBOFF		(0x16c) +/* Output DMA CR offset */ +#define EXYNOS_CIOCROFF		(0x170) +/* Input DMA Y offset */ +#define EXYNOS_CIIYOFF		(0x174) +/* Input DMA CB offset */ +#define EXYNOS_CIICBOFF		(0x178) +/* Input DMA CR offset */ +#define EXYNOS_CIICROFF		(0x17c) +/* Input DMA original image size */ +#define EXYNOS_ORGISIZE		(0x180) +/* Output DMA original image size */ +#define EXYNOS_ORGOSIZE		(0x184) +/* Real output DMA image size */ +#define EXYNOS_CIEXTEN		(0x188) +/* DMA parameter */ +#define EXYNOS_CIDMAPARAM		(0x18c) +/* MIPI CSI image format */ +#define EXYNOS_CSIIMGFMT		(0x194) +/* FIMC Clock Source Select */ +#define EXYNOS_MISC_FIMC		(0x198) + +/* Add for FIMC v5.1 */ +/* Output Frame Buffer Sequence */ +#define EXYNOS_CIFCNTSEQ		(0x1fc) +/* Y 5th frame start address for output DMA */ +#define EXYNOS_CIOYSA5		(0x200) +/* Y 6th frame start address for output DMA */ +#define EXYNOS_CIOYSA6		(0x204) +/* Y 7th frame start address for output DMA */ +#define EXYNOS_CIOYSA7		(0x208) +/* Y 8th frame start address for output DMA */ +#define EXYNOS_CIOYSA8		(0x20c) +/* Y 9th frame start address for output DMA */ +#define EXYNOS_CIOYSA9		(0x210) +/* Y 10th frame start address for output DMA */ +#define EXYNOS_CIOYSA10		(0x214) +/* Y 11th frame start address for output DMA */ +#define EXYNOS_CIOYSA11		(0x218) +/* Y 12th frame start address for output DMA */ +#define EXYNOS_CIOYSA12		(0x21c) +/* Y 13th frame start address for output DMA */ +#define EXYNOS_CIOYSA13		(0x220) +/* Y 14th frame start address for output DMA */ +#define EXYNOS_CIOYSA14		(0x224) +/* Y 15th frame start address for output DMA */ +#define EXYNOS_CIOYSA15		(0x228) +/* Y 16th frame start address for output DMA */ +#define EXYNOS_CIOYSA16		(0x22c) +/* Y 17th frame start address for output DMA */ +#define EXYNOS_CIOYSA17		(0x230) +/* Y 18th frame start address for output DMA */ +#define EXYNOS_CIOYSA18		(0x234) +/* Y 19th frame start address for output DMA */ +#define EXYNOS_CIOYSA19		(0x238) +/* Y 20th frame start address for output DMA */ +#define EXYNOS_CIOYSA20		(0x23c) +/* Y 21th frame start address for output DMA */ +#define EXYNOS_CIOYSA21		(0x240) +/* Y 22th frame start address for output DMA */ +#define EXYNOS_CIOYSA22		(0x244) +/* Y 23th frame start address for output DMA */ +#define EXYNOS_CIOYSA23		(0x248) +/* Y 24th frame start address for output DMA */ +#define EXYNOS_CIOYSA24		(0x24c) +/* Y 25th frame start address for output DMA */ +#define EXYNOS_CIOYSA25		(0x250) +/* Y 26th frame start address for output DMA */ +#define EXYNOS_CIOYSA26		(0x254) +/* Y 27th frame start address for output DMA */ +#define EXYNOS_CIOYSA27		(0x258) +/* Y 28th frame start address for output DMA */ +#define EXYNOS_CIOYSA28		(0x25c) +/* Y 29th frame start address for output DMA */ +#define EXYNOS_CIOYSA29		(0x260) +/* Y 30th frame start address for output DMA */ +#define EXYNOS_CIOYSA30		(0x264) +/* Y 31th frame start address for output DMA */ +#define EXYNOS_CIOYSA31		(0x268) +/* Y 32th frame start address for output DMA */ +#define EXYNOS_CIOYSA32		(0x26c) + +/* CB 5th frame start address for output DMA */ +#define EXYNOS_CIOCBSA5		(0x270) +/* CB 6th frame start address for output DMA */ +#define EXYNOS_CIOCBSA6		(0x274) +/* CB 7th frame start address for output DMA */ +#define EXYNOS_CIOCBSA7		(0x278) +/* CB 8th frame start address for output DMA */ +#define EXYNOS_CIOCBSA8		(0x27c) +/* CB 9th frame start address for output DMA */ +#define EXYNOS_CIOCBSA9		(0x280) +/* CB 10th frame start address for output DMA */ +#define EXYNOS_CIOCBSA10		(0x284) +/* CB 11th frame start address for output DMA */ +#define EXYNOS_CIOCBSA11		(0x288) +/* CB 12th frame start address for output DMA */ +#define EXYNOS_CIOCBSA12		(0x28c) +/* CB 13th frame start address for output DMA */ +#define EXYNOS_CIOCBSA13		(0x290) +/* CB 14th frame start address for output DMA */ +#define EXYNOS_CIOCBSA14		(0x294) +/* CB 15th frame start address for output DMA */ +#define EXYNOS_CIOCBSA15		(0x298) +/* CB 16th frame start address for output DMA */ +#define EXYNOS_CIOCBSA16		(0x29c) +/* CB 17th frame start address for output DMA */ +#define EXYNOS_CIOCBSA17		(0x2a0) +/* CB 18th frame start address for output DMA */ +#define EXYNOS_CIOCBSA18		(0x2a4) +/* CB 19th frame start address for output DMA */ +#define EXYNOS_CIOCBSA19		(0x2a8) +/* CB 20th frame start address for output DMA */ +#define EXYNOS_CIOCBSA20		(0x2ac) +/* CB 21th frame start address for output DMA */ +#define EXYNOS_CIOCBSA21		(0x2b0) +/* CB 22th frame start address for output DMA */ +#define EXYNOS_CIOCBSA22		(0x2b4) +/* CB 23th frame start address for output DMA */ +#define EXYNOS_CIOCBSA23		(0x2b8) +/* CB 24th frame start address for output DMA */ +#define EXYNOS_CIOCBSA24		(0x2bc) +/* CB 25th frame start address for output DMA */ +#define EXYNOS_CIOCBSA25		(0x2c0) +/* CB 26th frame start address for output DMA */ +#define EXYNOS_CIOCBSA26		(0x2c4) +/* CB 27th frame start address for output DMA */ +#define EXYNOS_CIOCBSA27		(0x2c8) +/* CB 28th frame start address for output DMA */ +#define EXYNOS_CIOCBSA28		(0x2cc) +/* CB 29th frame start address for output DMA */ +#define EXYNOS_CIOCBSA29		(0x2d0) +/* CB 30th frame start address for output DMA */ +#define EXYNOS_CIOCBSA30		(0x2d4) +/* CB 31th frame start address for output DMA */ +#define EXYNOS_CIOCBSA31		(0x2d8) +/* CB 32th frame start address for output DMA */ +#define EXYNOS_CIOCBSA32		(0x2dc) + +/* CR 5th frame start address for output DMA */ +#define EXYNOS_CIOCRSA5		(0x2e0) +/* CR 6th frame start address for output DMA */ +#define EXYNOS_CIOCRSA6		(0x2e4) +/* CR 7th frame start address for output DMA */ +#define EXYNOS_CIOCRSA7		(0x2e8) +/* CR 8th frame start address for output DMA */ +#define EXYNOS_CIOCRSA8		(0x2ec) +/* CR 9th frame start address for output DMA */ +#define EXYNOS_CIOCRSA9		(0x2f0) +/* CR 10th frame start address for output DMA */ +#define EXYNOS_CIOCRSA10		(0x2f4) +/* CR 11th frame start address for output DMA */ +#define EXYNOS_CIOCRSA11		(0x2f8) +/* CR 12th frame start address for output DMA */ +#define EXYNOS_CIOCRSA12		(0x2fc) +/* CR 13th frame start address for output DMA */ +#define EXYNOS_CIOCRSA13		(0x300) +/* CR 14th frame start address for output DMA */ +#define EXYNOS_CIOCRSA14		(0x304) +/* CR 15th frame start address for output DMA */ +#define EXYNOS_CIOCRSA15		(0x308) +/* CR 16th frame start address for output DMA */ +#define EXYNOS_CIOCRSA16		(0x30c) +/* CR 17th frame start address for output DMA */ +#define EXYNOS_CIOCRSA17		(0x310) +/* CR 18th frame start address for output DMA */ +#define EXYNOS_CIOCRSA18		(0x314) +/* CR 19th frame start address for output DMA */ +#define EXYNOS_CIOCRSA19		(0x318) +/* CR 20th frame start address for output DMA */ +#define EXYNOS_CIOCRSA20		(0x31c) +/* CR 21th frame start address for output DMA */ +#define EXYNOS_CIOCRSA21		(0x320) +/* CR 22th frame start address for output DMA */ +#define EXYNOS_CIOCRSA22		(0x324) +/* CR 23th frame start address for output DMA */ +#define EXYNOS_CIOCRSA23		(0x328) +/* CR 24th frame start address for output DMA */ +#define EXYNOS_CIOCRSA24		(0x32c) +/* CR 25th frame start address for output DMA */ +#define EXYNOS_CIOCRSA25		(0x330) +/* CR 26th frame start address for output DMA */ +#define EXYNOS_CIOCRSA26		(0x334) +/* CR 27th frame start address for output DMA */ +#define EXYNOS_CIOCRSA27		(0x338) +/* CR 28th frame start address for output DMA */ +#define EXYNOS_CIOCRSA28		(0x33c) +/* CR 29th frame start address for output DMA */ +#define EXYNOS_CIOCRSA29		(0x340) +/* CR 30th frame start address for output DMA */ +#define EXYNOS_CIOCRSA30		(0x344) +/* CR 31th frame start address for output DMA */ +#define EXYNOS_CIOCRSA31		(0x348) +/* CR 32th frame start address for output DMA */ +#define EXYNOS_CIOCRSA32		(0x34c) + +/* + * Macro part +*/ +/* frame start address 1 ~ 4, 5 ~ 32 */ +/* Number of Default PingPong Memory */ +#define DEF_PP		4 +#define EXYNOS_CIOYSA(__x)		\ +	(((__x) < DEF_PP) ?	\ +	 (EXYNOS_CIOYSA1  + (__x) * 4) : \ +	(EXYNOS_CIOYSA5  + ((__x) - DEF_PP) * 4)) +#define EXYNOS_CIOCBSA(__x)	\ +	(((__x) < DEF_PP) ?	\ +	 (EXYNOS_CIOCBSA1 + (__x) * 4) : \ +	(EXYNOS_CIOCBSA5 + ((__x) - DEF_PP) * 4)) +#define EXYNOS_CIOCRSA(__x)	\ +	(((__x) < DEF_PP) ?	\ +	 (EXYNOS_CIOCRSA1 + (__x) * 4) : \ +	(EXYNOS_CIOCRSA5 + ((__x) - DEF_PP) * 4)) +/* Number of Default PingPong Memory */ +#define DEF_IPP		1 +#define EXYNOS_CIIYSA(__x)		\ +	(((__x) < DEF_IPP) ?	\ +	 (EXYNOS_CIIYSA0) : (EXYNOS_CIIYSA1)) +#define EXYNOS_CIICBSA(__x)	\ +	(((__x) < DEF_IPP) ?	\ +	 (EXYNOS_CIICBSA0) : (EXYNOS_CIICBSA1)) +#define EXYNOS_CIICRSA(__x)	\ +	(((__x) < DEF_IPP) ?	\ +	 (EXYNOS_CIICRSA0) : (EXYNOS_CIICRSA1)) + +#define EXYNOS_CISRCFMT_SOURCEHSIZE(x)		((x) << 16) +#define EXYNOS_CISRCFMT_SOURCEVSIZE(x)		((x) << 0) + +#define EXYNOS_CIWDOFST_WINHOROFST(x)		((x) << 16) +#define EXYNOS_CIWDOFST_WINVEROFST(x)		((x) << 0) + +#define EXYNOS_CIWDOFST2_WINHOROFST2(x)		((x) << 16) +#define EXYNOS_CIWDOFST2_WINVEROFST2(x)		((x) << 0) + +#define EXYNOS_CITRGFMT_TARGETHSIZE(x)		(((x) & 0x1fff) << 16) +#define EXYNOS_CITRGFMT_TARGETVSIZE(x)		(((x) & 0x1fff) << 0) + +#define EXYNOS_CISCPRERATIO_SHFACTOR(x)		((x) << 28) +#define EXYNOS_CISCPRERATIO_PREHORRATIO(x)		((x) << 16) +#define EXYNOS_CISCPRERATIO_PREVERRATIO(x)		((x) << 0) + +#define EXYNOS_CISCPREDST_PREDSTWIDTH(x)		((x) << 16) +#define EXYNOS_CISCPREDST_PREDSTHEIGHT(x)		((x) << 0) + +#define EXYNOS_CISCCTRL_MAINHORRATIO(x)		((x) << 16) +#define EXYNOS_CISCCTRL_MAINVERRATIO(x)		((x) << 0) + +#define EXYNOS_CITAREA_TARGET_AREA(x)		((x) << 0) + +#define EXYNOS_CISTATUS_GET_FRAME_COUNT(x)		(((x) >> 26) & 0x3) +#define EXYNOS_CISTATUS_GET_FRAME_END(x)		(((x) >> 17) & 0x1) +#define EXYNOS_CISTATUS_GET_LAST_CAPTURE_END(x)	(((x) >> 16) & 0x1) +#define EXYNOS_CISTATUS_GET_LCD_STATUS(x)		(((x) >> 9) & 0x1) +#define EXYNOS_CISTATUS_GET_ENVID_STATUS(x)	(((x) >> 8) & 0x1) + +#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(x)	(((x) >> 7) & 0x3f) +#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(x)	((x) & 0x3f) + +#define EXYNOS_CIIMGEFF_FIN(x)			((x & 0x7) << 26) +#define EXYNOS_CIIMGEFF_PAT_CB(x)			((x) << 13) +#define EXYNOS_CIIMGEFF_PAT_CR(x)			((x) << 0) + +#define EXYNOS_CIILINESKIP(x)			(((x) & 0xf) << 24) + +#define EXYNOS_CIREAL_ISIZE_HEIGHT(x)		((x) << 16) +#define EXYNOS_CIREAL_ISIZE_WIDTH(x)		((x) << 0) + +#define EXYNOS_MSCTRL_SUCCESSIVE_COUNT(x)		((x) << 24) +#define EXYNOS_MSCTRL_GET_INDMA_STATUS(x)		((x) & 0x1) + +#define EXYNOS_CIOYOFF_VERTICAL(x)			((x) << 16) +#define EXYNOS_CIOYOFF_HORIZONTAL(x)		((x) << 0) + +#define EXYNOS_CIOCBOFF_VERTICAL(x)		((x) << 16) +#define EXYNOS_CIOCBOFF_HORIZONTAL(x)		((x) << 0) + +#define EXYNOS_CIOCROFF_VERTICAL(x)		((x) << 16) +#define EXYNOS_CIOCROFF_HORIZONTAL(x)		((x) << 0) + +#define EXYNOS_CIIYOFF_VERTICAL(x)			((x) << 16) +#define EXYNOS_CIIYOFF_HORIZONTAL(x)		((x) << 0) + +#define EXYNOS_CIICBOFF_VERTICAL(x)		((x) << 16) +#define EXYNOS_CIICBOFF_HORIZONTAL(x)		((x) << 0) + +#define EXYNOS_CIICROFF_VERTICAL(x)		((x) << 16) +#define EXYNOS_CIICROFF_HORIZONTAL(x)		((x) << 0) + +#define EXYNOS_ORGISIZE_VERTICAL(x)		((x) << 16) +#define EXYNOS_ORGISIZE_HORIZONTAL(x)		((x) << 0) + +#define EXYNOS_ORGOSIZE_VERTICAL(x)		((x) << 16) +#define EXYNOS_ORGOSIZE_HORIZONTAL(x)		((x) << 0) + +#define EXYNOS_CIEXTEN_TARGETH_EXT(x)		((((x) & 0x2000) >> 13) << 26) +#define EXYNOS_CIEXTEN_TARGETV_EXT(x)		((((x) & 0x2000) >> 13) << 24) +#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT(x)		(((x) & 0x3F) << 10) +#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT(x)		((x) & 0x3F) + +/* + * Bit definition part +*/ +/* Source format register */ +#define EXYNOS_CISRCFMT_ITU601_8BIT		(1 << 31) +#define EXYNOS_CISRCFMT_ITU656_8BIT		(0 << 31) +#define EXYNOS_CISRCFMT_ITU601_16BIT		(1 << 29) +#define EXYNOS_CISRCFMT_ORDER422_YCBYCR		(0 << 14) +#define EXYNOS_CISRCFMT_ORDER422_YCRYCB		(1 << 14) +#define EXYNOS_CISRCFMT_ORDER422_CBYCRY		(2 << 14) +#define EXYNOS_CISRCFMT_ORDER422_CRYCBY		(3 << 14) +/* ITU601 16bit only */ +#define EXYNOS_CISRCFMT_ORDER422_Y4CBCRCBCR	(0 << 14) +/* ITU601 16bit only */ +#define EXYNOS_CISRCFMT_ORDER422_Y4CRCBCRCB	(1 << 14) + +/* Window offset register */ +#define EXYNOS_CIWDOFST_WINOFSEN			(1 << 31) +#define EXYNOS_CIWDOFST_CLROVFIY			(1 << 30) +#define EXYNOS_CIWDOFST_CLROVRLB			(1 << 29) +#define EXYNOS_CIWDOFST_WINHOROFST_MASK		(0x7ff << 16) +#define EXYNOS_CIWDOFST_CLROVFICB			(1 << 15) +#define EXYNOS_CIWDOFST_CLROVFICR			(1 << 14) +#define EXYNOS_CIWDOFST_WINVEROFST_MASK		(0xfff << 0) + +/* Global control register */ +#define EXYNOS_CIGCTRL_SWRST			(1 << 31) +#define EXYNOS_CIGCTRL_CAMRST_A			(1 << 30) +#define EXYNOS_CIGCTRL_SELCAM_ITU_B		(0 << 29) +#define EXYNOS_CIGCTRL_SELCAM_ITU_A		(1 << 29) +#define EXYNOS_CIGCTRL_SELCAM_ITU_MASK		(1 << 29) +#define EXYNOS_CIGCTRL_TESTPATTERN_NORMAL		(0 << 27) +#define EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR	(1 << 27) +#define EXYNOS_CIGCTRL_TESTPATTERN_HOR_INC		(2 << 27) +#define EXYNOS_CIGCTRL_TESTPATTERN_VER_INC		(3 << 27) +#define EXYNOS_CIGCTRL_TESTPATTERN_MASK		(3 << 27) +#define EXYNOS_CIGCTRL_TESTPATTERN_SHIFT		(27) +#define EXYNOS_CIGCTRL_INVPOLPCLK			(1 << 26) +#define EXYNOS_CIGCTRL_INVPOLVSYNC			(1 << 25) +#define EXYNOS_CIGCTRL_INVPOLHREF			(1 << 24) +#define EXYNOS_CIGCTRL_IRQ_OVFEN			(1 << 22) +#define EXYNOS_CIGCTRL_HREF_MASK			(1 << 21) +#define EXYNOS_CIGCTRL_IRQ_EDGE			(0 << 20) +#define EXYNOS_CIGCTRL_IRQ_LEVEL			(1 << 20) +#define EXYNOS_CIGCTRL_IRQ_CLR			(1 << 19) +#define EXYNOS_CIGCTRL_IRQ_END_DISABLE		(1 << 18) +#define EXYNOS_CIGCTRL_IRQ_DISABLE			(0 << 16) +#define EXYNOS_CIGCTRL_IRQ_ENABLE			(1 << 16) +#define EXYNOS_CIGCTRL_SHADOW_DISABLE		(1 << 12) +#define EXYNOS_CIGCTRL_CAM_JPEG			(1 << 8) +#define EXYNOS_CIGCTRL_SELCAM_MIPI_B		(0 << 7) +#define EXYNOS_CIGCTRL_SELCAM_MIPI_A		(1 << 7) +#define EXYNOS_CIGCTRL_SELCAM_MIPI_MASK		(1 << 7) +#define EXYNOS_CIGCTRL_SELWB_CAMIF_CAMERA	(0 << 6) +#define EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK	(1 << 6) +#define EXYNOS_CIGCTRL_SELWRITEBACK_MASK		(1 << 10) +#define EXYNOS_CIGCTRL_SELWRITEBACK_A		(1 << 10) +#define EXYNOS_CIGCTRL_SELWRITEBACK_B		(0 << 10) +#define EXYNOS_CIGCTRL_SELWB_CAMIF_MASK		(1 << 6) +#define EXYNOS_CIGCTRL_CSC_ITU601			(0 << 5) +#define EXYNOS_CIGCTRL_CSC_ITU709			(1 << 5) +#define EXYNOS_CIGCTRL_CSC_MASK			(1 << 5) +#define EXYNOS_CIGCTRL_INVPOLHSYNC			(1 << 4) +#define EXYNOS_CIGCTRL_SELCAM_FIMC_ITU		(0 << 3) +#define EXYNOS_CIGCTRL_SELCAM_FIMC_MIPI		(1 << 3) +#define EXYNOS_CIGCTRL_SELCAM_FIMC_MASK		(1 << 3) +#define EXYNOS_CIGCTRL_PROGRESSIVE			(0 << 0) +#define EXYNOS_CIGCTRL_INTERLACE			(1 << 0) + +/* Window offset2 register */ +#define EXYNOS_CIWDOFST_WINHOROFST2_MASK		(0xfff << 16) +#define EXYNOS_CIWDOFST_WINVEROFST2_MASK		(0xfff << 16) + +/* Target format register */ +#define EXYNOS_CITRGFMT_INROT90_CLOCKWISE		(1 << 31) +#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420		(0 << 29) +#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422		(1 << 29) +#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE	(2 << 29) +#define EXYNOS_CITRGFMT_OUTFORMAT_RGB		(3 << 29) +#define EXYNOS_CITRGFMT_OUTFORMAT_MASK		(3 << 29) +#define EXYNOS_CITRGFMT_FLIP_SHIFT			(14) +#define EXYNOS_CITRGFMT_FLIP_NORMAL		(0 << 14) +#define EXYNOS_CITRGFMT_FLIP_X_MIRROR		(1 << 14) +#define EXYNOS_CITRGFMT_FLIP_Y_MIRROR		(2 << 14) +#define EXYNOS_CITRGFMT_FLIP_180			(3 << 14) +#define EXYNOS_CITRGFMT_FLIP_MASK			(3 << 14) +#define EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE		(1 << 13) +#define EXYNOS_CITRGFMT_TARGETV_MASK		(0x1fff << 0) +#define EXYNOS_CITRGFMT_TARGETH_MASK		(0x1fff << 16) + +/* Output DMA control register */ +#define EXYNOS_CIOCTRL_WEAVE_OUT			(1 << 31) +#define EXYNOS_CIOCTRL_WEAVE_MASK			(1 << 31) +#define EXYNOS_CIOCTRL_LASTENDEN			(1 << 30) +#define EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR		(0 << 24) +#define EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB		(1 << 24) +#define EXYNOS_CIOCTRL_ORDER2P_MSB_CRCB		(2 << 24) +#define EXYNOS_CIOCTRL_ORDER2P_MSB_CBCR		(3 << 24) +#define EXYNOS_CIOCTRL_ORDER2P_SHIFT		(24) +#define EXYNOS_CIOCTRL_ORDER2P_MASK		(3 << 24) +#define EXYNOS_CIOCTRL_YCBCR_3PLANE		(0 << 3) +#define EXYNOS_CIOCTRL_YCBCR_2PLANE		(1 << 3) +#define EXYNOS_CIOCTRL_YCBCR_PLANE_MASK		(1 << 3) +#define EXYNOS_CIOCTRL_LASTIRQ_ENABLE		(1 << 2) +#define EXYNOS_CIOCTRL_ALPHA_OUT			(0xff << 4) +#define EXYNOS_CIOCTRL_ORDER422_YCBYCR		(0 << 0) +#define EXYNOS_CIOCTRL_ORDER422_YCRYCB		(1 << 0) +#define EXYNOS_CIOCTRL_ORDER422_CBYCRY		(2 << 0) +#define EXYNOS_CIOCTRL_ORDER422_CRYCBY		(3 << 0) +#define EXYNOS_CIOCTRL_ORDER422_MASK		(3 << 0) + +/* Main scaler control register */ +#define EXYNOS_CISCCTRL_SCALERBYPASS		(1 << 31) +#define EXYNOS_CISCCTRL_SCALEUP_H			(1 << 30) +#define EXYNOS_CISCCTRL_SCALEUP_V			(1 << 29) +#define EXYNOS_CISCCTRL_CSCR2Y_NARROW		(0 << 28) +#define EXYNOS_CISCCTRL_CSCR2Y_WIDE		(1 << 28) +#define EXYNOS_CISCCTRL_CSCY2R_NARROW		(0 << 27) +#define EXYNOS_CISCCTRL_CSCY2R_WIDE		(1 << 27) +#define EXYNOS_CISCCTRL_LCDPATHEN_FIFO		(1 << 26) +#define EXYNOS_CISCCTRL_PROGRESSIVE		(0 << 25) +#define EXYNOS_CISCCTRL_INTERLACE			(1 << 25) +#define EXYNOS_CISCCTRL_SCAN_MASK			(1 << 25) +#define EXYNOS_CISCCTRL_SCALERSTART		(1 << 15) +#define EXYNOS_CISCCTRL_INRGB_FMT_RGB565		(0 << 13) +#define EXYNOS_CISCCTRL_INRGB_FMT_RGB666		(1 << 13) +#define EXYNOS_CISCCTRL_INRGB_FMT_RGB888		(2 << 13) +#define EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK		(3 << 13) +#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565		(0 << 11) +#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB666		(1 << 11) +#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888		(2 << 11) +#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK	(3 << 11) +#define EXYNOS_CISCCTRL_EXTRGB_NORMAL		(0 << 10) +#define EXYNOS_CISCCTRL_EXTRGB_EXTENSION		(1 << 10) +#define EXYNOS_CISCCTRL_ONE2ONE			(1 << 9) +#define EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK		(0x1ff << 0) +#define EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK		(0x1ff << 16) + +/* Status register */ +#define EXYNOS_CISTATUS_OVFIY			(1 << 31) +#define EXYNOS_CISTATUS_OVFICB			(1 << 30) +#define EXYNOS_CISTATUS_OVFICR			(1 << 29) +#define EXYNOS_CISTATUS_VSYNC			(1 << 28) +#define EXYNOS_CISTATUS_SCALERSTART		(1 << 26) +#define EXYNOS_CISTATUS_WINOFSTEN			(1 << 25) +#define EXYNOS_CISTATUS_IMGCPTEN			(1 << 22) +#define EXYNOS_CISTATUS_IMGCPTENSC			(1 << 21) +#define EXYNOS_CISTATUS_VSYNC_A			(1 << 20) +#define EXYNOS_CISTATUS_VSYNC_B			(1 << 19) +#define EXYNOS_CISTATUS_OVRLB			(1 << 18) +#define EXYNOS_CISTATUS_FRAMEEND			(1 << 17) +#define EXYNOS_CISTATUS_LASTCAPTUREEND		(1 << 16) +#define EXYNOS_CISTATUS_VVALID_A			(1 << 15) +#define EXYNOS_CISTATUS_VVALID_B			(1 << 14) + +/* Image capture enable register */ +#define EXYNOS_CIIMGCPT_IMGCPTEN			(1 << 31) +#define EXYNOS_CIIMGCPT_IMGCPTEN_SC		(1 << 30) +#define EXYNOS_CIIMGCPT_CPT_FREN_ENABLE		(1 << 25) +#define EXYNOS_CIIMGCPT_CPT_FRMOD_EN		(0 << 18) +#define EXYNOS_CIIMGCPT_CPT_FRMOD_CNT		(1 << 18) + +/* Image effects register */ +#define EXYNOS_CIIMGEFF_IE_DISABLE			(0 << 30) +#define EXYNOS_CIIMGEFF_IE_ENABLE			(1 << 30) +#define EXYNOS_CIIMGEFF_IE_SC_BEFORE		(0 << 29) +#define EXYNOS_CIIMGEFF_IE_SC_AFTER		(1 << 29) +#define EXYNOS_CIIMGEFF_FIN_BYPASS			(0 << 26) +#define EXYNOS_CIIMGEFF_FIN_ARBITRARY		(1 << 26) +#define EXYNOS_CIIMGEFF_FIN_NEGATIVE		(2 << 26) +#define EXYNOS_CIIMGEFF_FIN_ARTFREEZE		(3 << 26) +#define EXYNOS_CIIMGEFF_FIN_EMBOSSING		(4 << 26) +#define EXYNOS_CIIMGEFF_FIN_SILHOUETTE		(5 << 26) +#define EXYNOS_CIIMGEFF_FIN_MASK			(7 << 26) +#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK		((0xff < 13) | (0xff < 0)) + +/* Real input DMA size register */ +#define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE	(1 << 31) +#define EXYNOS_CIREAL_ISIZE_ADDR_CH_DISABLE	(1 << 30) +#define EXYNOS_CIREAL_ISIZE_HEIGHT_MASK		(0x3FFF << 16) +#define EXYNOS_CIREAL_ISIZE_WIDTH_MASK		(0x3FFF << 0) + +/* Input DMA control register */ +#define EXYNOS_MSCTRL_FIELD_MASK			(1 << 31) +#define EXYNOS_MSCTRL_FIELD_WEAVE			(1 << 31) +#define EXYNOS_MSCTRL_FIELD_NORMAL			(0 << 31) +#define EXYNOS_MSCTRL_BURST_CNT			(24) +#define EXYNOS_MSCTRL_BURST_CNT_MASK		(0xf << 24) +#define EXYNOS_MSCTRL_ORDER2P_LSB_CBCR		(0 << 16) +#define EXYNOS_MSCTRL_ORDER2P_LSB_CRCB		(1 << 16) +#define EXYNOS_MSCTRL_ORDER2P_MSB_CRCB		(2 << 16) +#define EXYNOS_MSCTRL_ORDER2P_MSB_CBCR		(3 << 16) +#define EXYNOS_MSCTRL_ORDER2P_SHIFT		(16) +#define EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK		(0x3 << 16) +#define EXYNOS_MSCTRL_C_INT_IN_3PLANE		(0 << 15) +#define EXYNOS_MSCTRL_C_INT_IN_2PLANE		(1 << 15) +#define EXYNOS_MSCTRL_FLIP_SHIFT			(13) +#define EXYNOS_MSCTRL_FLIP_NORMAL			(0 << 13) +#define EXYNOS_MSCTRL_FLIP_X_MIRROR		(1 << 13) +#define EXYNOS_MSCTRL_FLIP_Y_MIRROR		(2 << 13) +#define EXYNOS_MSCTRL_FLIP_180			(3 << 13) +#define EXYNOS_MSCTRL_FLIP_MASK			(3 << 13) +#define EXYNOS_MSCTRL_ORDER422_CRYCBY		(0 << 4) +#define EXYNOS_MSCTRL_ORDER422_YCRYCB		(1 << 4) +#define EXYNOS_MSCTRL_ORDER422_CBYCRY		(2 << 4) +#define EXYNOS_MSCTRL_ORDER422_YCBYCR		(3 << 4) +#define EXYNOS_MSCTRL_INPUT_EXTCAM			(0 << 3) +#define EXYNOS_MSCTRL_INPUT_MEMORY			(1 << 3) +#define EXYNOS_MSCTRL_INPUT_MASK			(1 << 3) +#define EXYNOS_MSCTRL_INFORMAT_YCBCR420		(0 << 1) +#define EXYNOS_MSCTRL_INFORMAT_YCBCR422		(1 << 1) +#define EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE	(2 << 1) +#define EXYNOS_MSCTRL_INFORMAT_RGB			(3 << 1) +#define EXYNOS_MSCTRL_ENVID			(1 << 0) + +/* DMA parameter register */ +#define EXYNOS_CIDMAPARAM_R_MODE_LINEAR		(0 << 29) +#define EXYNOS_CIDMAPARAM_R_MODE_CONFTILE		(1 << 29) +#define EXYNOS_CIDMAPARAM_R_MODE_16X16		(2 << 29) +#define EXYNOS_CIDMAPARAM_R_MODE_64X32		(3 << 29) +#define EXYNOS_CIDMAPARAM_R_MODE_MASK		(3 << 29) +#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_64		(0 << 24) +#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_128		(1 << 24) +#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_256		(2 << 24) +#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_512		(3 << 24) +#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_1024	(4 << 24) +#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_2048	(5 << 24) +#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_4096	(6 << 24) +#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_1		(0 << 20) +#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_2		(1 << 20) +#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_4		(2 << 20) +#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_8		(3 << 20) +#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_16		(4 << 20) +#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_32		(5 << 20) +#define EXYNOS_CIDMAPARAM_W_MODE_LINEAR		(0 << 13) +#define EXYNOS_CIDMAPARAM_W_MODE_CONFTILE		(1 << 13) +#define EXYNOS_CIDMAPARAM_W_MODE_16X16		(2 << 13) +#define EXYNOS_CIDMAPARAM_W_MODE_64X32		(3 << 13) +#define EXYNOS_CIDMAPARAM_W_MODE_MASK		(3 << 13) +#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_64		(0 << 8) +#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_128		(1 << 8) +#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_256		(2 << 8) +#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_512		(3 << 8) +#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_1024	(4 << 8) +#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_2048	(5 << 8) +#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_4096	(6 << 8) +#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_1		(0 << 4) +#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_2		(1 << 4) +#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_4		(2 << 4) +#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_8		(3 << 4) +#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_16		(4 << 4) +#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_32		(5 << 4) + +/* Gathering Extension register */ +#define EXYNOS_CIEXTEN_TARGETH_EXT_MASK		(1 << 26) +#define EXYNOS_CIEXTEN_TARGETV_EXT_MASK		(1 << 24) +#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK	(0x3F << 10) +#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK	(0x3F) +#define EXYNOS_CIEXTEN_YUV444_OUT			(1 << 22) + +/* FIMC Clock Source Select register */ +#define EXYNOS_CLKSRC_HCLK				(0 << 1) +#define EXYNOS_CLKSRC_HCLK_MASK			(1 << 1) +#define EXYNOS_CLKSRC_SCLK				(1 << 1) + +/* SYSREG for FIMC writeback */ +#define SYSREG_CAMERA_BLK			(0x0218) +#define SYSREG_FIMD0WB_DEST_MASK		(0x3 << 23) +#define SYSREG_FIMD0WB_DEST_SHIFT		23 + +#endif /* EXYNOS_REGS_FIMC_H */ diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h new file mode 100644 index 00000000000..9ad592707aa --- /dev/null +++ b/drivers/gpu/drm/exynos/regs-gsc.h @@ -0,0 +1,284 @@ +/* linux/drivers/gpu/drm/exynos/regs-gsc.h + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + *		http://www.samsung.com + * + * Register definition file for Samsung G-Scaler driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef EXYNOS_REGS_GSC_H_ +#define EXYNOS_REGS_GSC_H_ + +/* G-Scaler enable */ +#define GSC_ENABLE			0x00 +#define GSC_ENABLE_PP_UPDATE_TIME_MASK	(1 << 9) +#define GSC_ENABLE_PP_UPDATE_TIME_CURR	(0 << 9) +#define GSC_ENABLE_PP_UPDATE_TIME_EOPAS	(1 << 9) +#define GSC_ENABLE_CLK_GATE_MODE_MASK	(1 << 8) +#define GSC_ENABLE_CLK_GATE_MODE_FREE	(1 << 8) +#define GSC_ENABLE_IPC_MODE_MASK	(1 << 7) +#define GSC_ENABLE_NORM_MODE		(0 << 7) +#define GSC_ENABLE_IPC_MODE		(1 << 7) +#define GSC_ENABLE_PP_UPDATE_MODE_MASK	(1 << 6) +#define GSC_ENABLE_PP_UPDATE_FIRE_MODE	(1 << 6) +#define GSC_ENABLE_IN_PP_UPDATE		(1 << 5) +#define GSC_ENABLE_ON_CLEAR_MASK	(1 << 4) +#define GSC_ENABLE_ON_CLEAR_ONESHOT	(1 << 4) +#define GSC_ENABLE_QOS_ENABLE		(1 << 3) +#define GSC_ENABLE_OP_STATUS		(1 << 2) +#define GSC_ENABLE_SFR_UPDATE		(1 << 1) +#define GSC_ENABLE_ON			(1 << 0) + +/* G-Scaler S/W reset */ +#define GSC_SW_RESET			0x04 +#define GSC_SW_RESET_SRESET		(1 << 0) + +/* G-Scaler IRQ */ +#define GSC_IRQ				0x08 +#define GSC_IRQ_STATUS_OR_IRQ		(1 << 17) +#define GSC_IRQ_STATUS_OR_FRM_DONE	(1 << 16) +#define GSC_IRQ_OR_MASK			(1 << 2) +#define GSC_IRQ_FRMDONE_MASK		(1 << 1) +#define GSC_IRQ_ENABLE			(1 << 0) + +/* G-Scaler input control */ +#define GSC_IN_CON			0x10 +#define GSC_IN_CHROM_STRIDE_SEL_MASK	(1 << 20) +#define GSC_IN_CHROM_STRIDE_SEPAR	(1 << 20) +#define GSC_IN_RB_SWAP_MASK		(1 << 19) +#define GSC_IN_RB_SWAP			(1 << 19) +#define GSC_IN_ROT_MASK			(7 << 16) +#define GSC_IN_ROT_270			(7 << 16) +#define GSC_IN_ROT_90_YFLIP		(6 << 16) +#define GSC_IN_ROT_90_XFLIP		(5 << 16) +#define GSC_IN_ROT_90			(4 << 16) +#define GSC_IN_ROT_180			(3 << 16) +#define GSC_IN_ROT_YFLIP		(2 << 16) +#define GSC_IN_ROT_XFLIP		(1 << 16) +#define GSC_IN_RGB_TYPE_MASK		(3 << 14) +#define GSC_IN_RGB_HD_WIDE		(3 << 14) +#define GSC_IN_RGB_HD_NARROW		(2 << 14) +#define GSC_IN_RGB_SD_WIDE		(1 << 14) +#define GSC_IN_RGB_SD_NARROW		(0 << 14) +#define GSC_IN_YUV422_1P_ORDER_MASK	(1 << 13) +#define GSC_IN_YUV422_1P_ORDER_LSB_Y	(0 << 13) +#define GSC_IN_YUV422_1P_OEDER_LSB_C	(1 << 13) +#define GSC_IN_CHROMA_ORDER_MASK	(1 << 12) +#define GSC_IN_CHROMA_ORDER_CBCR	(0 << 12) +#define GSC_IN_CHROMA_ORDER_CRCB	(1 << 12) +#define GSC_IN_FORMAT_MASK		(7 << 8) +#define GSC_IN_XRGB8888			(0 << 8) +#define GSC_IN_RGB565			(1 << 8) +#define GSC_IN_YUV420_2P		(2 << 8) +#define GSC_IN_YUV420_3P		(3 << 8) +#define GSC_IN_YUV422_1P		(4 << 8) +#define GSC_IN_YUV422_2P		(5 << 8) +#define GSC_IN_YUV422_3P		(6 << 8) +#define GSC_IN_TILE_TYPE_MASK		(1 << 4) +#define GSC_IN_TILE_C_16x8		(0 << 4) +#define GSC_IN_TILE_C_16x16		(1 << 4) +#define GSC_IN_TILE_MODE		(1 << 3) +#define GSC_IN_LOCAL_SEL_MASK		(3 << 1) +#define GSC_IN_LOCAL_CAM3		(3 << 1) +#define GSC_IN_LOCAL_FIMD_WB		(2 << 1) +#define GSC_IN_LOCAL_CAM1		(1 << 1) +#define GSC_IN_LOCAL_CAM0		(0 << 1) +#define GSC_IN_PATH_MASK		(1 << 0) +#define GSC_IN_PATH_LOCAL		(1 << 0) +#define GSC_IN_PATH_MEMORY		(0 << 0) + +/* G-Scaler source image size */ +#define GSC_SRCIMG_SIZE			0x14 +#define GSC_SRCIMG_HEIGHT_MASK		(0x1fff << 16) +#define GSC_SRCIMG_HEIGHT(x)		((x) << 16) +#define GSC_SRCIMG_WIDTH_MASK		(0x3fff << 0) +#define GSC_SRCIMG_WIDTH(x)		((x) << 0) + +/* G-Scaler source image offset */ +#define GSC_SRCIMG_OFFSET		0x18 +#define GSC_SRCIMG_OFFSET_Y_MASK	(0x1fff << 16) +#define GSC_SRCIMG_OFFSET_Y(x)		((x) << 16) +#define GSC_SRCIMG_OFFSET_X_MASK	(0x1fff << 0) +#define GSC_SRCIMG_OFFSET_X(x)		((x) << 0) + +/* G-Scaler cropped source image size */ +#define GSC_CROPPED_SIZE		0x1C +#define GSC_CROPPED_HEIGHT_MASK		(0x1fff << 16) +#define GSC_CROPPED_HEIGHT(x)		((x) << 16) +#define GSC_CROPPED_WIDTH_MASK		(0x1fff << 0) +#define GSC_CROPPED_WIDTH(x)		((x) << 0) + +/* G-Scaler output control */ +#define GSC_OUT_CON			0x20 +#define GSC_OUT_GLOBAL_ALPHA_MASK	(0xff << 24) +#define GSC_OUT_GLOBAL_ALPHA(x)		((x) << 24) +#define GSC_OUT_CHROM_STRIDE_SEL_MASK	(1 << 13) +#define GSC_OUT_CHROM_STRIDE_SEPAR	(1 << 13) +#define GSC_OUT_RB_SWAP_MASK		(1 << 12) +#define GSC_OUT_RB_SWAP			(1 << 12) +#define GSC_OUT_RGB_TYPE_MASK		(3 << 10) +#define GSC_OUT_RGB_HD_NARROW		(3 << 10) +#define GSC_OUT_RGB_HD_WIDE		(2 << 10) +#define GSC_OUT_RGB_SD_NARROW		(1 << 10) +#define GSC_OUT_RGB_SD_WIDE		(0 << 10) +#define GSC_OUT_YUV422_1P_ORDER_MASK	(1 << 9) +#define GSC_OUT_YUV422_1P_ORDER_LSB_Y	(0 << 9) +#define GSC_OUT_YUV422_1P_OEDER_LSB_C	(1 << 9) +#define GSC_OUT_CHROMA_ORDER_MASK	(1 << 8) +#define GSC_OUT_CHROMA_ORDER_CBCR	(0 << 8) +#define GSC_OUT_CHROMA_ORDER_CRCB	(1 << 8) +#define GSC_OUT_FORMAT_MASK		(7 << 4) +#define GSC_OUT_XRGB8888		(0 << 4) +#define GSC_OUT_RGB565			(1 << 4) +#define GSC_OUT_YUV420_2P		(2 << 4) +#define GSC_OUT_YUV420_3P		(3 << 4) +#define GSC_OUT_YUV422_1P		(4 << 4) +#define GSC_OUT_YUV422_2P		(5 << 4) +#define GSC_OUT_YUV444			(7 << 4) +#define GSC_OUT_TILE_TYPE_MASK		(1 << 2) +#define GSC_OUT_TILE_C_16x8		(0 << 2) +#define GSC_OUT_TILE_C_16x16		(1 << 2) +#define GSC_OUT_TILE_MODE		(1 << 1) +#define GSC_OUT_PATH_MASK		(1 << 0) +#define GSC_OUT_PATH_LOCAL		(1 << 0) +#define GSC_OUT_PATH_MEMORY		(0 << 0) + +/* G-Scaler scaled destination image size */ +#define GSC_SCALED_SIZE			0x24 +#define GSC_SCALED_HEIGHT_MASK		(0x1fff << 16) +#define GSC_SCALED_HEIGHT(x)		((x) << 16) +#define GSC_SCALED_WIDTH_MASK		(0x1fff << 0) +#define GSC_SCALED_WIDTH(x)		((x) << 0) + +/* G-Scaler pre scale ratio */ +#define GSC_PRE_SCALE_RATIO		0x28 +#define GSC_PRESC_SHFACTOR_MASK		(7 << 28) +#define GSC_PRESC_SHFACTOR(x)		((x) << 28) +#define GSC_PRESC_V_RATIO_MASK		(7 << 16) +#define GSC_PRESC_V_RATIO(x)		((x) << 16) +#define GSC_PRESC_H_RATIO_MASK		(7 << 0) +#define GSC_PRESC_H_RATIO(x)		((x) << 0) + +/* G-Scaler main scale horizontal ratio */ +#define GSC_MAIN_H_RATIO		0x2C +#define GSC_MAIN_H_RATIO_MASK		(0xfffff << 0) +#define GSC_MAIN_H_RATIO_VALUE(x)	((x) << 0) + +/* G-Scaler main scale vertical ratio */ +#define GSC_MAIN_V_RATIO		0x30 +#define GSC_MAIN_V_RATIO_MASK		(0xfffff << 0) +#define GSC_MAIN_V_RATIO_VALUE(x)	((x) << 0) + +/* G-Scaler input chrominance stride */ +#define GSC_IN_CHROM_STRIDE		0x3C +#define GSC_IN_CHROM_STRIDE_MASK	(0x3fff << 0) +#define GSC_IN_CHROM_STRIDE_VALUE(x)	((x) << 0) + +/* G-Scaler destination image size */ +#define GSC_DSTIMG_SIZE			0x40 +#define GSC_DSTIMG_HEIGHT_MASK		(0x1fff << 16) +#define GSC_DSTIMG_HEIGHT(x)		((x) << 16) +#define GSC_DSTIMG_WIDTH_MASK		(0x1fff << 0) +#define GSC_DSTIMG_WIDTH(x)		((x) << 0) + +/* G-Scaler destination image offset */ +#define GSC_DSTIMG_OFFSET		0x44 +#define GSC_DSTIMG_OFFSET_Y_MASK	(0x1fff << 16) +#define GSC_DSTIMG_OFFSET_Y(x)		((x) << 16) +#define GSC_DSTIMG_OFFSET_X_MASK	(0x1fff << 0) +#define GSC_DSTIMG_OFFSET_X(x)		((x) << 0) + +/* G-Scaler output chrominance stride */ +#define GSC_OUT_CHROM_STRIDE		0x48 +#define GSC_OUT_CHROM_STRIDE_MASK	(0x3fff << 0) +#define GSC_OUT_CHROM_STRIDE_VALUE(x)	((x) << 0) + +/* G-Scaler input y address mask */ +#define GSC_IN_BASE_ADDR_Y_MASK		0x4C +/* G-Scaler input y base address */ +#define GSC_IN_BASE_ADDR_Y(n)		(0x50 + (n) * 0x4) +/* G-Scaler input y base current address */ +#define GSC_IN_BASE_ADDR_Y_CUR(n)	(0x60 + (n) * 0x4) + +/* G-Scaler input cb address mask */ +#define GSC_IN_BASE_ADDR_CB_MASK	0x7C +/* G-Scaler input cb base address */ +#define GSC_IN_BASE_ADDR_CB(n)		(0x80 + (n) * 0x4) +/* G-Scaler input cb base current address */ +#define GSC_IN_BASE_ADDR_CB_CUR(n)	(0x90 + (n) * 0x4) + +/* G-Scaler input cr address mask */ +#define GSC_IN_BASE_ADDR_CR_MASK	0xAC +/* G-Scaler input cr base address */ +#define GSC_IN_BASE_ADDR_CR(n)		(0xB0 + (n) * 0x4) +/* G-Scaler input cr base current address */ +#define GSC_IN_BASE_ADDR_CR_CUR(n)	(0xC0 + (n) * 0x4) + +/* G-Scaler input address mask */ +#define GSC_IN_CURR_ADDR_INDEX	(0xf << 24) +#define GSC_IN_CURR_GET_INDEX(x)	((x) >> 24) +#define GSC_IN_BASE_ADDR_PINGPONG(x)	((x) << 16) +#define GSC_IN_BASE_ADDR_MASK		(0xff << 0) + +/* G-Scaler output y address mask */ +#define GSC_OUT_BASE_ADDR_Y_MASK	0x10C +/* G-Scaler output y base address */ +#define GSC_OUT_BASE_ADDR_Y(n)		(0x110 + (n) * 0x4) + +/* G-Scaler output cb address mask */ +#define GSC_OUT_BASE_ADDR_CB_MASK	0x15C +/* G-Scaler output cb base address */ +#define GSC_OUT_BASE_ADDR_CB(n)		(0x160 + (n) * 0x4) + +/* G-Scaler output cr address mask */ +#define GSC_OUT_BASE_ADDR_CR_MASK	0x1AC +/* G-Scaler output cr base address */ +#define GSC_OUT_BASE_ADDR_CR(n)		(0x1B0 + (n) * 0x4) + +/* G-Scaler output address mask */ +#define GSC_OUT_CURR_ADDR_INDEX		(0xf << 24) +#define GSC_OUT_CURR_GET_INDEX(x)	((x) >> 24) +#define GSC_OUT_BASE_ADDR_PINGPONG(x)	((x) << 16) +#define GSC_OUT_BASE_ADDR_MASK		(0xffff << 0) + +/* G-Scaler horizontal scaling filter */ +#define GSC_HCOEF(n, s, x)	(0x300 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300) + +/* G-Scaler vertical scaling filter */ +#define GSC_VCOEF(n, s, x)	(0x200 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300) + +/* G-Scaler BUS control */ +#define GSC_BUSCON			0xA78 +#define GSC_BUSCON_INT_TIME_MASK	(1 << 8) +#define GSC_BUSCON_INT_DATA_TRANS	(0 << 8) +#define GSC_BUSCON_INT_AXI_RESPONSE	(1 << 8) +#define GSC_BUSCON_AWCACHE(x)		((x) << 4) +#define GSC_BUSCON_ARCACHE(x)		((x) << 0) + +/* G-Scaler V position */ +#define GSC_VPOSITION			0xA7C +#define GSC_VPOS_F(x)			((x) << 0) + + +/* G-Scaler clock initial count */ +#define GSC_CLK_INIT_COUNT		0xC00 +#define GSC_CLK_GATE_MODE_INIT_CNT(x)	((x) << 0) + +/* G-Scaler clock snoop count */ +#define GSC_CLK_SNOOP_COUNT		0xC04 +#define GSC_CLK_GATE_MODE_SNOOP_CNT(x)	((x) << 0) + +/* SYSCON. GSCBLK_CFG */ +#define SYSREG_GSCBLK_CFG1		(S3C_VA_SYS + 0x0224) +#define GSC_BLK_DISP1WB_DEST(x)		(x << 10) +#define GSC_BLK_SW_RESET_WB_DEST(x)	(1 << (18 + x)) +#define GSC_BLK_PXLASYNC_LO_MASK_WB(x)	(0 << (14 + x)) +#define GSC_BLK_GSCL_WB_IN_SRC_SEL(x)	(1 << (2 * x)) +#define SYSREG_GSCBLK_CFG2		(S3C_VA_SYS + 0x2000) +#define PXLASYNC_LO_MASK_CAMIF_GSCL(x)	(1 << (x)) + +#endif /* EXYNOS_REGS_GSC_H_ */ diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h new file mode 100644 index 00000000000..3f35ac6d8a4 --- /dev/null +++ b/drivers/gpu/drm/exynos/regs-hdmi.h @@ -0,0 +1,597 @@ +/* + * + *  Cloned from drivers/media/video/s5p-tv/regs-hdmi.h + * + * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * HDMI register header file for Samsung TVOUT driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef SAMSUNG_REGS_HDMI_H +#define SAMSUNG_REGS_HDMI_H + +/* + * Register part +*/ + +/* HDMI Version 1.3 & Common */ +#define HDMI_CTRL_BASE(x)		((x) + 0x00000000) +#define HDMI_CORE_BASE(x)		((x) + 0x00010000) +#define HDMI_I2S_BASE(x)		((x) + 0x00040000) +#define HDMI_TG_BASE(x)			((x) + 0x00050000) + +/* Control registers */ +#define HDMI_INTC_CON			HDMI_CTRL_BASE(0x0000) +#define HDMI_INTC_FLAG			HDMI_CTRL_BASE(0x0004) +#define HDMI_HPD_STATUS			HDMI_CTRL_BASE(0x000C) +#define HDMI_V13_PHY_RSTOUT		HDMI_CTRL_BASE(0x0014) +#define HDMI_V13_PHY_VPLL		HDMI_CTRL_BASE(0x0018) +#define HDMI_V13_PHY_CMU		HDMI_CTRL_BASE(0x001C) +#define HDMI_V13_CORE_RSTOUT		HDMI_CTRL_BASE(0x0020) + +/* Core registers */ +#define HDMI_CON_0			HDMI_CORE_BASE(0x0000) +#define HDMI_CON_1			HDMI_CORE_BASE(0x0004) +#define HDMI_CON_2			HDMI_CORE_BASE(0x0008) +#define HDMI_SYS_STATUS			HDMI_CORE_BASE(0x0010) +#define HDMI_V13_PHY_STATUS		HDMI_CORE_BASE(0x0014) +#define HDMI_STATUS_EN			HDMI_CORE_BASE(0x0020) +#define HDMI_HPD			HDMI_CORE_BASE(0x0030) +#define HDMI_MODE_SEL			HDMI_CORE_BASE(0x0040) +#define HDMI_ENC_EN			HDMI_CORE_BASE(0x0044) +#define HDMI_V13_BLUE_SCREEN_0		HDMI_CORE_BASE(0x0050) +#define HDMI_V13_BLUE_SCREEN_1		HDMI_CORE_BASE(0x0054) +#define HDMI_V13_BLUE_SCREEN_2		HDMI_CORE_BASE(0x0058) +#define HDMI_H_BLANK_0			HDMI_CORE_BASE(0x00A0) +#define HDMI_H_BLANK_1			HDMI_CORE_BASE(0x00A4) +#define HDMI_V13_V_BLANK_0		HDMI_CORE_BASE(0x00B0) +#define HDMI_V13_V_BLANK_1		HDMI_CORE_BASE(0x00B4) +#define HDMI_V13_V_BLANK_2		HDMI_CORE_BASE(0x00B8) +#define HDMI_V13_H_V_LINE_0		HDMI_CORE_BASE(0x00C0) +#define HDMI_V13_H_V_LINE_1		HDMI_CORE_BASE(0x00C4) +#define HDMI_V13_H_V_LINE_2		HDMI_CORE_BASE(0x00C8) +#define HDMI_VSYNC_POL			HDMI_CORE_BASE(0x00E4) +#define HDMI_INT_PRO_MODE		HDMI_CORE_BASE(0x00E8) +#define HDMI_V13_V_BLANK_F_0		HDMI_CORE_BASE(0x0110) +#define HDMI_V13_V_BLANK_F_1		HDMI_CORE_BASE(0x0114) +#define HDMI_V13_V_BLANK_F_2		HDMI_CORE_BASE(0x0118) +#define HDMI_V13_H_SYNC_GEN_0		HDMI_CORE_BASE(0x0120) +#define HDMI_V13_H_SYNC_GEN_1		HDMI_CORE_BASE(0x0124) +#define HDMI_V13_H_SYNC_GEN_2		HDMI_CORE_BASE(0x0128) +#define HDMI_V13_V_SYNC_GEN_1_0		HDMI_CORE_BASE(0x0130) +#define HDMI_V13_V_SYNC_GEN_1_1		HDMI_CORE_BASE(0x0134) +#define HDMI_V13_V_SYNC_GEN_1_2		HDMI_CORE_BASE(0x0138) +#define HDMI_V13_V_SYNC_GEN_2_0		HDMI_CORE_BASE(0x0140) +#define HDMI_V13_V_SYNC_GEN_2_1		HDMI_CORE_BASE(0x0144) +#define HDMI_V13_V_SYNC_GEN_2_2		HDMI_CORE_BASE(0x0148) +#define HDMI_V13_V_SYNC_GEN_3_0		HDMI_CORE_BASE(0x0150) +#define HDMI_V13_V_SYNC_GEN_3_1		HDMI_CORE_BASE(0x0154) +#define HDMI_V13_V_SYNC_GEN_3_2		HDMI_CORE_BASE(0x0158) +#define HDMI_V13_ACR_CON		HDMI_CORE_BASE(0x0180) +#define HDMI_V13_AVI_CON		HDMI_CORE_BASE(0x0300) +#define HDMI_V13_AVI_BYTE(n)		HDMI_CORE_BASE(0x0320 + 4 * (n)) +#define HDMI_V13_DC_CONTROL		HDMI_CORE_BASE(0x05C0) +#define HDMI_V13_VIDEO_PATTERN_GEN	HDMI_CORE_BASE(0x05C4) +#define HDMI_V13_HPD_GEN		HDMI_CORE_BASE(0x05C8) +#define HDMI_V13_AUI_CON		HDMI_CORE_BASE(0x0360) +#define HDMI_V13_SPD_CON		HDMI_CORE_BASE(0x0400) + +/* Timing generator registers */ +#define HDMI_TG_CMD			HDMI_TG_BASE(0x0000) +#define HDMI_TG_H_FSZ_L			HDMI_TG_BASE(0x0018) +#define HDMI_TG_H_FSZ_H			HDMI_TG_BASE(0x001C) +#define HDMI_TG_HACT_ST_L		HDMI_TG_BASE(0x0020) +#define HDMI_TG_HACT_ST_H		HDMI_TG_BASE(0x0024) +#define HDMI_TG_HACT_SZ_L		HDMI_TG_BASE(0x0028) +#define HDMI_TG_HACT_SZ_H		HDMI_TG_BASE(0x002C) +#define HDMI_TG_V_FSZ_L			HDMI_TG_BASE(0x0030) +#define HDMI_TG_V_FSZ_H			HDMI_TG_BASE(0x0034) +#define HDMI_TG_VSYNC_L			HDMI_TG_BASE(0x0038) +#define HDMI_TG_VSYNC_H			HDMI_TG_BASE(0x003C) +#define HDMI_TG_VSYNC2_L		HDMI_TG_BASE(0x0040) +#define HDMI_TG_VSYNC2_H		HDMI_TG_BASE(0x0044) +#define HDMI_TG_VACT_ST_L		HDMI_TG_BASE(0x0048) +#define HDMI_TG_VACT_ST_H		HDMI_TG_BASE(0x004C) +#define HDMI_TG_VACT_SZ_L		HDMI_TG_BASE(0x0050) +#define HDMI_TG_VACT_SZ_H		HDMI_TG_BASE(0x0054) +#define HDMI_TG_FIELD_CHG_L		HDMI_TG_BASE(0x0058) +#define HDMI_TG_FIELD_CHG_H		HDMI_TG_BASE(0x005C) +#define HDMI_TG_VACT_ST2_L		HDMI_TG_BASE(0x0060) +#define HDMI_TG_VACT_ST2_H		HDMI_TG_BASE(0x0064) +#define HDMI_TG_VSYNC_TOP_HDMI_L	HDMI_TG_BASE(0x0078) +#define HDMI_TG_VSYNC_TOP_HDMI_H	HDMI_TG_BASE(0x007C) +#define HDMI_TG_VSYNC_BOT_HDMI_L	HDMI_TG_BASE(0x0080) +#define HDMI_TG_VSYNC_BOT_HDMI_H	HDMI_TG_BASE(0x0084) +#define HDMI_TG_FIELD_TOP_HDMI_L	HDMI_TG_BASE(0x0088) +#define HDMI_TG_FIELD_TOP_HDMI_H	HDMI_TG_BASE(0x008C) +#define HDMI_TG_FIELD_BOT_HDMI_L	HDMI_TG_BASE(0x0090) +#define HDMI_TG_FIELD_BOT_HDMI_H	HDMI_TG_BASE(0x0094) + +/* + * Bit definition part + */ + +/* HDMI_INTC_CON */ +#define HDMI_INTC_EN_GLOBAL		(1 << 6) +#define HDMI_INTC_EN_HPD_PLUG		(1 << 3) +#define HDMI_INTC_EN_HPD_UNPLUG		(1 << 2) + +/* HDMI_INTC_FLAG */ +#define HDMI_INTC_FLAG_HPD_PLUG		(1 << 3) +#define HDMI_INTC_FLAG_HPD_UNPLUG	(1 << 2) + +/* HDMI_PHY_RSTOUT */ +#define HDMI_PHY_SW_RSTOUT		(1 << 0) + +/* HDMI_CORE_RSTOUT */ +#define HDMI_CORE_SW_RSTOUT		(1 << 0) + +/* HDMI_CON_0 */ +#define HDMI_BLUE_SCR_EN		(1 << 5) +#define HDMI_ASP_EN			(1 << 2) +#define HDMI_ASP_DIS			(0 << 2) +#define HDMI_ASP_MASK			(1 << 2) +#define HDMI_EN				(1 << 0) + +/* HDMI_CON_2 */ +#define HDMI_VID_PREAMBLE_DIS		(1 << 5) +#define HDMI_GUARD_BAND_DIS		(1 << 1) + +/* HDMI_PHY_STATUS */ +#define HDMI_PHY_STATUS_READY		(1 << 0) + +/* HDMI_MODE_SEL */ +#define HDMI_MODE_HDMI_EN		(1 << 1) +#define HDMI_MODE_DVI_EN		(1 << 0) +#define HDMI_MODE_MASK			(3 << 0) + +/* HDMI_TG_CMD */ +#define HDMI_TG_EN			(1 << 0) +#define HDMI_FIELD_EN			(1 << 1) + + +/* HDMI Version 1.4 */ +/* Control registers */ +/* #define HDMI_INTC_CON		HDMI_CTRL_BASE(0x0000) */ +/* #define HDMI_INTC_FLAG		HDMI_CTRL_BASE(0x0004) */ +#define HDMI_HDCP_KEY_LOAD		HDMI_CTRL_BASE(0x0008) +/* #define HDMI_HPD_STATUS		HDMI_CTRL_BASE(0x000C) */ +#define HDMI_INTC_CON_1			HDMI_CTRL_BASE(0x0010) +#define HDMI_INTC_FLAG_1		HDMI_CTRL_BASE(0x0014) +#define HDMI_PHY_STATUS_0		HDMI_CTRL_BASE(0x0020) +#define HDMI_PHY_STATUS_CMU		HDMI_CTRL_BASE(0x0024) +#define HDMI_PHY_STATUS_PLL		HDMI_CTRL_BASE(0x0028) +#define HDMI_PHY_CON_0			HDMI_CTRL_BASE(0x0030) +#define HDMI_HPD_CTRL			HDMI_CTRL_BASE(0x0040) +#define HDMI_HPD_ST			HDMI_CTRL_BASE(0x0044) +#define HDMI_HPD_TH_X			HDMI_CTRL_BASE(0x0050) +#define HDMI_AUDIO_CLKSEL		HDMI_CTRL_BASE(0x0070) +#define HDMI_PHY_RSTOUT			HDMI_CTRL_BASE(0x0074) +#define HDMI_PHY_VPLL			HDMI_CTRL_BASE(0x0078) +#define HDMI_PHY_CMU			HDMI_CTRL_BASE(0x007C) +#define HDMI_CORE_RSTOUT		HDMI_CTRL_BASE(0x0080) + +/* PHY Control bit definition */ + +/* HDMI_PHY_CON_0 */ +#define HDMI_PHY_POWER_OFF_EN		(1 << 0) + +/* Video related registers */ +#define HDMI_YMAX			HDMI_CORE_BASE(0x0060) +#define HDMI_YMIN			HDMI_CORE_BASE(0x0064) +#define HDMI_CMAX			HDMI_CORE_BASE(0x0068) +#define HDMI_CMIN			HDMI_CORE_BASE(0x006C) + +#define HDMI_V2_BLANK_0			HDMI_CORE_BASE(0x00B0) +#define HDMI_V2_BLANK_1			HDMI_CORE_BASE(0x00B4) +#define HDMI_V1_BLANK_0			HDMI_CORE_BASE(0x00B8) +#define HDMI_V1_BLANK_1			HDMI_CORE_BASE(0x00BC) + +#define HDMI_V_LINE_0			HDMI_CORE_BASE(0x00C0) +#define HDMI_V_LINE_1			HDMI_CORE_BASE(0x00C4) +#define HDMI_H_LINE_0			HDMI_CORE_BASE(0x00C8) +#define HDMI_H_LINE_1			HDMI_CORE_BASE(0x00CC) + +#define HDMI_HSYNC_POL			HDMI_CORE_BASE(0x00E0) + +#define HDMI_V_BLANK_F0_0		HDMI_CORE_BASE(0x0110) +#define HDMI_V_BLANK_F0_1		HDMI_CORE_BASE(0x0114) +#define HDMI_V_BLANK_F1_0		HDMI_CORE_BASE(0x0118) +#define HDMI_V_BLANK_F1_1		HDMI_CORE_BASE(0x011C) + +#define HDMI_H_SYNC_START_0		HDMI_CORE_BASE(0x0120) +#define HDMI_H_SYNC_START_1		HDMI_CORE_BASE(0x0124) +#define HDMI_H_SYNC_END_0		HDMI_CORE_BASE(0x0128) +#define HDMI_H_SYNC_END_1		HDMI_CORE_BASE(0x012C) + +#define HDMI_V_SYNC_LINE_BEF_2_0	HDMI_CORE_BASE(0x0130) +#define HDMI_V_SYNC_LINE_BEF_2_1	HDMI_CORE_BASE(0x0134) +#define HDMI_V_SYNC_LINE_BEF_1_0	HDMI_CORE_BASE(0x0138) +#define HDMI_V_SYNC_LINE_BEF_1_1	HDMI_CORE_BASE(0x013C) + +#define HDMI_V_SYNC_LINE_AFT_2_0	HDMI_CORE_BASE(0x0140) +#define HDMI_V_SYNC_LINE_AFT_2_1	HDMI_CORE_BASE(0x0144) +#define HDMI_V_SYNC_LINE_AFT_1_0	HDMI_CORE_BASE(0x0148) +#define HDMI_V_SYNC_LINE_AFT_1_1	HDMI_CORE_BASE(0x014C) + +#define HDMI_V_SYNC_LINE_AFT_PXL_2_0	HDMI_CORE_BASE(0x0150) +#define HDMI_V_SYNC_LINE_AFT_PXL_2_1	HDMI_CORE_BASE(0x0154) +#define HDMI_V_SYNC_LINE_AFT_PXL_1_0	HDMI_CORE_BASE(0x0158) +#define HDMI_V_SYNC_LINE_AFT_PXL_1_1	HDMI_CORE_BASE(0x015C) + +#define HDMI_V_BLANK_F2_0		HDMI_CORE_BASE(0x0160) +#define HDMI_V_BLANK_F2_1		HDMI_CORE_BASE(0x0164) +#define HDMI_V_BLANK_F3_0		HDMI_CORE_BASE(0x0168) +#define HDMI_V_BLANK_F3_1		HDMI_CORE_BASE(0x016C) +#define HDMI_V_BLANK_F4_0		HDMI_CORE_BASE(0x0170) +#define HDMI_V_BLANK_F4_1		HDMI_CORE_BASE(0x0174) +#define HDMI_V_BLANK_F5_0		HDMI_CORE_BASE(0x0178) +#define HDMI_V_BLANK_F5_1		HDMI_CORE_BASE(0x017C) + +#define HDMI_V_SYNC_LINE_AFT_3_0	HDMI_CORE_BASE(0x0180) +#define HDMI_V_SYNC_LINE_AFT_3_1	HDMI_CORE_BASE(0x0184) +#define HDMI_V_SYNC_LINE_AFT_4_0	HDMI_CORE_BASE(0x0188) +#define HDMI_V_SYNC_LINE_AFT_4_1	HDMI_CORE_BASE(0x018C) +#define HDMI_V_SYNC_LINE_AFT_5_0	HDMI_CORE_BASE(0x0190) +#define HDMI_V_SYNC_LINE_AFT_5_1	HDMI_CORE_BASE(0x0194) +#define HDMI_V_SYNC_LINE_AFT_6_0	HDMI_CORE_BASE(0x0198) +#define HDMI_V_SYNC_LINE_AFT_6_1	HDMI_CORE_BASE(0x019C) + +#define HDMI_V_SYNC_LINE_AFT_PXL_3_0	HDMI_CORE_BASE(0x01A0) +#define HDMI_V_SYNC_LINE_AFT_PXL_3_1	HDMI_CORE_BASE(0x01A4) +#define HDMI_V_SYNC_LINE_AFT_PXL_4_0	HDMI_CORE_BASE(0x01A8) +#define HDMI_V_SYNC_LINE_AFT_PXL_4_1	HDMI_CORE_BASE(0x01AC) +#define HDMI_V_SYNC_LINE_AFT_PXL_5_0	HDMI_CORE_BASE(0x01B0) +#define HDMI_V_SYNC_LINE_AFT_PXL_5_1	HDMI_CORE_BASE(0x01B4) +#define HDMI_V_SYNC_LINE_AFT_PXL_6_0	HDMI_CORE_BASE(0x01B8) +#define HDMI_V_SYNC_LINE_AFT_PXL_6_1	HDMI_CORE_BASE(0x01BC) + +#define HDMI_VACT_SPACE_1_0		HDMI_CORE_BASE(0x01C0) +#define HDMI_VACT_SPACE_1_1		HDMI_CORE_BASE(0x01C4) +#define HDMI_VACT_SPACE_2_0		HDMI_CORE_BASE(0x01C8) +#define HDMI_VACT_SPACE_2_1		HDMI_CORE_BASE(0x01CC) +#define HDMI_VACT_SPACE_3_0		HDMI_CORE_BASE(0x01D0) +#define HDMI_VACT_SPACE_3_1		HDMI_CORE_BASE(0x01D4) +#define HDMI_VACT_SPACE_4_0		HDMI_CORE_BASE(0x01D8) +#define HDMI_VACT_SPACE_4_1		HDMI_CORE_BASE(0x01DC) +#define HDMI_VACT_SPACE_5_0		HDMI_CORE_BASE(0x01E0) +#define HDMI_VACT_SPACE_5_1		HDMI_CORE_BASE(0x01E4) +#define HDMI_VACT_SPACE_6_0		HDMI_CORE_BASE(0x01E8) +#define HDMI_VACT_SPACE_6_1		HDMI_CORE_BASE(0x01EC) + +#define HDMI_GCP_CON			HDMI_CORE_BASE(0x0200) +#define HDMI_GCP_BYTE1			HDMI_CORE_BASE(0x0210) +#define HDMI_GCP_BYTE2			HDMI_CORE_BASE(0x0214) +#define HDMI_GCP_BYTE3			HDMI_CORE_BASE(0x0218) + +/* Audio related registers */ +#define HDMI_ASP_CON			HDMI_CORE_BASE(0x0300) +#define HDMI_ASP_SP_FLAT		HDMI_CORE_BASE(0x0304) +#define HDMI_ASP_CHCFG0			HDMI_CORE_BASE(0x0310) +#define HDMI_ASP_CHCFG1			HDMI_CORE_BASE(0x0314) +#define HDMI_ASP_CHCFG2			HDMI_CORE_BASE(0x0318) +#define HDMI_ASP_CHCFG3			HDMI_CORE_BASE(0x031C) + +#define HDMI_ACR_CON			HDMI_CORE_BASE(0x0400) +#define HDMI_ACR_MCTS0			HDMI_CORE_BASE(0x0410) +#define HDMI_ACR_MCTS1			HDMI_CORE_BASE(0x0414) +#define HDMI_ACR_MCTS2			HDMI_CORE_BASE(0x0418) +#define HDMI_ACR_CTS0			HDMI_CORE_BASE(0x0420) +#define HDMI_ACR_CTS1			HDMI_CORE_BASE(0x0424) +#define HDMI_ACR_CTS2			HDMI_CORE_BASE(0x0428) +#define HDMI_ACR_N0			HDMI_CORE_BASE(0x0430) +#define HDMI_ACR_N1			HDMI_CORE_BASE(0x0434) +#define HDMI_ACR_N2			HDMI_CORE_BASE(0x0438) + +/* Packet related registers */ +#define HDMI_ACP_CON			HDMI_CORE_BASE(0x0500) +#define HDMI_ACP_TYPE			HDMI_CORE_BASE(0x0514) +#define HDMI_ACP_DATA(n)		HDMI_CORE_BASE(0x0520 + 4 * (n)) + +#define HDMI_ISRC_CON			HDMI_CORE_BASE(0x0600) +#define HDMI_ISRC1_HEADER1		HDMI_CORE_BASE(0x0614) +#define HDMI_ISRC1_DATA(n)		HDMI_CORE_BASE(0x0620 + 4 * (n)) +#define HDMI_ISRC2_DATA(n)		HDMI_CORE_BASE(0x06A0 + 4 * (n)) + +#define HDMI_AVI_CON			HDMI_CORE_BASE(0x0700) +#define HDMI_AVI_HEADER0		HDMI_CORE_BASE(0x0710) +#define HDMI_AVI_HEADER1		HDMI_CORE_BASE(0x0714) +#define HDMI_AVI_HEADER2		HDMI_CORE_BASE(0x0718) +#define HDMI_AVI_CHECK_SUM		HDMI_CORE_BASE(0x071C) +#define HDMI_AVI_BYTE(n)		HDMI_CORE_BASE(0x0720 + 4 * (n-1)) + +#define HDMI_AUI_CON			HDMI_CORE_BASE(0x0800) +#define HDMI_AUI_HEADER0		HDMI_CORE_BASE(0x0810) +#define HDMI_AUI_HEADER1		HDMI_CORE_BASE(0x0814) +#define HDMI_AUI_HEADER2		HDMI_CORE_BASE(0x0818) +#define HDMI_AUI_CHECK_SUM		HDMI_CORE_BASE(0x081C) +#define HDMI_AUI_BYTE(n)		HDMI_CORE_BASE(0x0820 + 4 * (n-1)) + +#define HDMI_MPG_CON			HDMI_CORE_BASE(0x0900) +#define HDMI_MPG_CHECK_SUM		HDMI_CORE_BASE(0x091C) +#define HDMI_MPG_DATA(n)		HDMI_CORE_BASE(0x0920 + 4 * (n)) + +#define HDMI_SPD_CON			HDMI_CORE_BASE(0x0A00) +#define HDMI_SPD_HEADER0		HDMI_CORE_BASE(0x0A10) +#define HDMI_SPD_HEADER1		HDMI_CORE_BASE(0x0A14) +#define HDMI_SPD_HEADER2		HDMI_CORE_BASE(0x0A18) +#define HDMI_SPD_DATA(n)		HDMI_CORE_BASE(0x0A20 + 4 * (n)) + +#define HDMI_GAMUT_CON			HDMI_CORE_BASE(0x0B00) +#define HDMI_GAMUT_HEADER0		HDMI_CORE_BASE(0x0B10) +#define HDMI_GAMUT_HEADER1		HDMI_CORE_BASE(0x0B14) +#define HDMI_GAMUT_HEADER2		HDMI_CORE_BASE(0x0B18) +#define HDMI_GAMUT_METADATA(n)		HDMI_CORE_BASE(0x0B20 + 4 * (n)) + +#define HDMI_VSI_CON			HDMI_CORE_BASE(0x0C00) +#define HDMI_VSI_HEADER0		HDMI_CORE_BASE(0x0C10) +#define HDMI_VSI_HEADER1		HDMI_CORE_BASE(0x0C14) +#define HDMI_VSI_HEADER2		HDMI_CORE_BASE(0x0C18) +#define HDMI_VSI_DATA(n)		HDMI_CORE_BASE(0x0C20 + 4 * (n)) + +#define HDMI_DC_CONTROL			HDMI_CORE_BASE(0x0D00) +#define HDMI_VIDEO_PATTERN_GEN		HDMI_CORE_BASE(0x0D04) + +#define HDMI_AN_SEED_SEL		HDMI_CORE_BASE(0x0E48) +#define HDMI_AN_SEED_0			HDMI_CORE_BASE(0x0E58) +#define HDMI_AN_SEED_1			HDMI_CORE_BASE(0x0E5C) +#define HDMI_AN_SEED_2			HDMI_CORE_BASE(0x0E60) +#define HDMI_AN_SEED_3			HDMI_CORE_BASE(0x0E64) + +/* AVI bit definition */ +#define HDMI_AVI_CON_DO_NOT_TRANSMIT	(0 << 1) +#define HDMI_AVI_CON_EVERY_VSYNC	(1 << 1) + +#define AVI_ACTIVE_FORMAT_VALID	(1 << 4) +#define AVI_UNDERSCANNED_DISPLAY_VALID	(1 << 1) + +/* AUI bit definition */ +#define HDMI_AUI_CON_NO_TRAN		(0 << 0) + +/* VSI bit definition */ +#define HDMI_VSI_CON_DO_NOT_TRANSMIT	(0 << 0) + +/* HDCP related registers */ +#define HDMI_HDCP_SHA1(n)		HDMI_CORE_BASE(0x7000 + 4 * (n)) +#define HDMI_HDCP_KSV_LIST(n)		HDMI_CORE_BASE(0x7050 + 4 * (n)) + +#define HDMI_HDCP_KSV_LIST_CON		HDMI_CORE_BASE(0x7064) +#define HDMI_HDCP_SHA_RESULT		HDMI_CORE_BASE(0x7070) +#define HDMI_HDCP_CTRL1			HDMI_CORE_BASE(0x7080) +#define HDMI_HDCP_CTRL2			HDMI_CORE_BASE(0x7084) +#define HDMI_HDCP_CHECK_RESULT		HDMI_CORE_BASE(0x7090) +#define HDMI_HDCP_BKSV(n)		HDMI_CORE_BASE(0x70A0 + 4 * (n)) +#define HDMI_HDCP_AKSV(n)		HDMI_CORE_BASE(0x70C0 + 4 * (n)) +#define HDMI_HDCP_AN(n)			HDMI_CORE_BASE(0x70E0 + 4 * (n)) + +#define HDMI_HDCP_BCAPS			HDMI_CORE_BASE(0x7100) +#define HDMI_HDCP_BSTATUS_0		HDMI_CORE_BASE(0x7110) +#define HDMI_HDCP_BSTATUS_1		HDMI_CORE_BASE(0x7114) +#define HDMI_HDCP_RI_0			HDMI_CORE_BASE(0x7140) +#define HDMI_HDCP_RI_1			HDMI_CORE_BASE(0x7144) +#define HDMI_HDCP_I2C_INT		HDMI_CORE_BASE(0x7180) +#define HDMI_HDCP_AN_INT		HDMI_CORE_BASE(0x7190) +#define HDMI_HDCP_WDT_INT		HDMI_CORE_BASE(0x71A0) +#define HDMI_HDCP_RI_INT		HDMI_CORE_BASE(0x71B0) +#define HDMI_HDCP_RI_COMPARE_0		HDMI_CORE_BASE(0x71D0) +#define HDMI_HDCP_RI_COMPARE_1		HDMI_CORE_BASE(0x71D4) +#define HDMI_HDCP_FRAME_COUNT		HDMI_CORE_BASE(0x71E0) + +#define HDMI_RGB_ROUND_EN		HDMI_CORE_BASE(0xD500) +#define HDMI_VACT_SPACE_R_0		HDMI_CORE_BASE(0xD504) +#define HDMI_VACT_SPACE_R_1		HDMI_CORE_BASE(0xD508) +#define HDMI_VACT_SPACE_G_0		HDMI_CORE_BASE(0xD50C) +#define HDMI_VACT_SPACE_G_1		HDMI_CORE_BASE(0xD510) +#define HDMI_VACT_SPACE_B_0		HDMI_CORE_BASE(0xD514) +#define HDMI_VACT_SPACE_B_1		HDMI_CORE_BASE(0xD518) + +#define HDMI_BLUE_SCREEN_B_0		HDMI_CORE_BASE(0xD520) +#define HDMI_BLUE_SCREEN_B_1		HDMI_CORE_BASE(0xD524) +#define HDMI_BLUE_SCREEN_G_0		HDMI_CORE_BASE(0xD528) +#define HDMI_BLUE_SCREEN_G_1		HDMI_CORE_BASE(0xD52C) +#define HDMI_BLUE_SCREEN_R_0		HDMI_CORE_BASE(0xD530) +#define HDMI_BLUE_SCREEN_R_1		HDMI_CORE_BASE(0xD534) + +/* HDMI I2S register */ +#define HDMI_I2S_CLK_CON		HDMI_I2S_BASE(0x000) +#define HDMI_I2S_CON_1			HDMI_I2S_BASE(0x004) +#define HDMI_I2S_CON_2			HDMI_I2S_BASE(0x008) +#define HDMI_I2S_PIN_SEL_0		HDMI_I2S_BASE(0x00c) +#define HDMI_I2S_PIN_SEL_1		HDMI_I2S_BASE(0x010) +#define HDMI_I2S_PIN_SEL_2		HDMI_I2S_BASE(0x014) +#define HDMI_I2S_PIN_SEL_3		HDMI_I2S_BASE(0x018) +#define HDMI_I2S_DSD_CON		HDMI_I2S_BASE(0x01c) +#define HDMI_I2S_MUX_CON		HDMI_I2S_BASE(0x020) +#define HDMI_I2S_CH_ST_CON		HDMI_I2S_BASE(0x024) +#define HDMI_I2S_CH_ST_0		HDMI_I2S_BASE(0x028) +#define HDMI_I2S_CH_ST_1		HDMI_I2S_BASE(0x02c) +#define HDMI_I2S_CH_ST_2		HDMI_I2S_BASE(0x030) +#define HDMI_I2S_CH_ST_3		HDMI_I2S_BASE(0x034) +#define HDMI_I2S_CH_ST_4		HDMI_I2S_BASE(0x038) +#define HDMI_I2S_CH_ST_SH_0		HDMI_I2S_BASE(0x03c) +#define HDMI_I2S_CH_ST_SH_1		HDMI_I2S_BASE(0x040) +#define HDMI_I2S_CH_ST_SH_2		HDMI_I2S_BASE(0x044) +#define HDMI_I2S_CH_ST_SH_3		HDMI_I2S_BASE(0x048) +#define HDMI_I2S_CH_ST_SH_4		HDMI_I2S_BASE(0x04c) +#define HDMI_I2S_MUX_CH			HDMI_I2S_BASE(0x054) +#define HDMI_I2S_MUX_CUV		HDMI_I2S_BASE(0x058) + +/* I2S bit definition */ + +/* I2S_CLK_CON */ +#define HDMI_I2S_CLK_DIS		(0) +#define HDMI_I2S_CLK_EN			(1) + +/* I2S_CON_1 */ +#define HDMI_I2S_SCLK_FALLING_EDGE	(0 << 1) +#define HDMI_I2S_SCLK_RISING_EDGE	(1 << 1) +#define HDMI_I2S_L_CH_LOW_POL		(0) +#define HDMI_I2S_L_CH_HIGH_POL		(1) + +/* I2S_CON_2 */ +#define HDMI_I2S_MSB_FIRST_MODE		(0 << 6) +#define HDMI_I2S_LSB_FIRST_MODE		(1 << 6) +#define HDMI_I2S_BIT_CH_32FS		(0 << 4) +#define HDMI_I2S_BIT_CH_48FS		(1 << 4) +#define HDMI_I2S_BIT_CH_RESERVED	(2 << 4) +#define HDMI_I2S_SDATA_16BIT		(1 << 2) +#define HDMI_I2S_SDATA_20BIT		(2 << 2) +#define HDMI_I2S_SDATA_24BIT		(3 << 2) +#define HDMI_I2S_BASIC_FORMAT		(0) +#define HDMI_I2S_L_JUST_FORMAT		(2) +#define HDMI_I2S_R_JUST_FORMAT		(3) +#define HDMI_I2S_CON_2_CLR		(~(0xFF)) +#define HDMI_I2S_SET_BIT_CH(x)		(((x) & 0x7) << 4) +#define HDMI_I2S_SET_SDATA_BIT(x)	(((x) & 0x7) << 2) + +/* I2S_PIN_SEL_0 */ +#define HDMI_I2S_SEL_SCLK(x)		(((x) & 0x7) << 4) +#define HDMI_I2S_SEL_LRCK(x)		((x) & 0x7) + +/* I2S_PIN_SEL_1 */ +#define HDMI_I2S_SEL_SDATA1(x)		(((x) & 0x7) << 4) +#define HDMI_I2S_SEL_SDATA2(x)		((x) & 0x7) + +/* I2S_PIN_SEL_2 */ +#define HDMI_I2S_SEL_SDATA3(x)		(((x) & 0x7) << 4) +#define HDMI_I2S_SEL_SDATA2(x)		((x) & 0x7) + +/* I2S_PIN_SEL_3 */ +#define HDMI_I2S_SEL_DSD(x)		((x) & 0x7) + +/* I2S_DSD_CON */ +#define HDMI_I2S_DSD_CLK_RI_EDGE	(1 << 1) +#define HDMI_I2S_DSD_CLK_FA_EDGE	(0 << 1) +#define HDMI_I2S_DSD_ENABLE		(1) +#define HDMI_I2S_DSD_DISABLE		(0) + +/* I2S_MUX_CON */ +#define HDMI_I2S_NOISE_FILTER_ZERO	(0 << 5) +#define HDMI_I2S_NOISE_FILTER_2_STAGE	(1 << 5) +#define HDMI_I2S_NOISE_FILTER_3_STAGE	(2 << 5) +#define HDMI_I2S_NOISE_FILTER_4_STAGE	(3 << 5) +#define HDMI_I2S_NOISE_FILTER_5_STAGE	(4 << 5) +#define HDMI_I2S_IN_DISABLE		(1 << 4) +#define HDMI_I2S_IN_ENABLE		(0 << 4) +#define HDMI_I2S_AUD_SPDIF		(0 << 2) +#define HDMI_I2S_AUD_I2S		(1 << 2) +#define HDMI_I2S_AUD_DSD		(2 << 2) +#define HDMI_I2S_CUV_SPDIF_ENABLE	(0 << 1) +#define HDMI_I2S_CUV_I2S_ENABLE		(1 << 1) +#define HDMI_I2S_MUX_DISABLE		(0) +#define HDMI_I2S_MUX_ENABLE		(1) +#define HDMI_I2S_MUX_CON_CLR		(~(0xFF)) + +/* I2S_CH_ST_CON */ +#define HDMI_I2S_CH_STATUS_RELOAD	(1) +#define HDMI_I2S_CH_ST_CON_CLR		(~(1)) + +/* I2S_CH_ST_0 / I2S_CH_ST_SH_0 */ +#define HDMI_I2S_CH_STATUS_MODE_0	(0 << 6) +#define HDMI_I2S_2AUD_CH_WITHOUT_PREEMPH	(0 << 3) +#define HDMI_I2S_2AUD_CH_WITH_PREEMPH	(1 << 3) +#define HDMI_I2S_DEFAULT_EMPHASIS	(0 << 3) +#define HDMI_I2S_COPYRIGHT		(0 << 2) +#define HDMI_I2S_NO_COPYRIGHT		(1 << 2) +#define HDMI_I2S_LINEAR_PCM		(0 << 1) +#define HDMI_I2S_NO_LINEAR_PCM		(1 << 1) +#define HDMI_I2S_CONSUMER_FORMAT	(0) +#define HDMI_I2S_PROF_FORMAT		(1) +#define HDMI_I2S_CH_ST_0_CLR		(~(0xFF)) + +/* I2S_CH_ST_1 / I2S_CH_ST_SH_1 */ +#define HDMI_I2S_CD_PLAYER		(0x00) +#define HDMI_I2S_DAT_PLAYER		(0x03) +#define HDMI_I2S_DCC_PLAYER		(0x43) +#define HDMI_I2S_MINI_DISC_PLAYER	(0x49) + +/* I2S_CH_ST_2 / I2S_CH_ST_SH_2 */ +#define HDMI_I2S_CHANNEL_NUM_MASK	(0xF << 4) +#define HDMI_I2S_SOURCE_NUM_MASK	(0xF) +#define HDMI_I2S_SET_CHANNEL_NUM(x)	(((x) & (0xF)) << 4) +#define HDMI_I2S_SET_SOURCE_NUM(x)	((x) & (0xF)) + +/* I2S_CH_ST_3 / I2S_CH_ST_SH_3 */ +#define HDMI_I2S_CLK_ACCUR_LEVEL_1	(1 << 4) +#define HDMI_I2S_CLK_ACCUR_LEVEL_2	(0 << 4) +#define HDMI_I2S_CLK_ACCUR_LEVEL_3	(2 << 4) +#define HDMI_I2S_SMP_FREQ_44_1		(0x0) +#define HDMI_I2S_SMP_FREQ_48		(0x2) +#define HDMI_I2S_SMP_FREQ_32		(0x3) +#define HDMI_I2S_SMP_FREQ_96		(0xA) +#define HDMI_I2S_SET_SMP_FREQ(x)	((x) & (0xF)) + +/* I2S_CH_ST_4 / I2S_CH_ST_SH_4 */ +#define HDMI_I2S_ORG_SMP_FREQ_44_1	(0xF << 4) +#define HDMI_I2S_ORG_SMP_FREQ_88_2	(0x7 << 4) +#define HDMI_I2S_ORG_SMP_FREQ_22_05	(0xB << 4) +#define HDMI_I2S_ORG_SMP_FREQ_176_4	(0x3 << 4) +#define HDMI_I2S_WORD_LEN_NOT_DEFINE	(0x0 << 1) +#define HDMI_I2S_WORD_LEN_MAX24_20BITS	(0x1 << 1) +#define HDMI_I2S_WORD_LEN_MAX24_22BITS	(0x2 << 1) +#define HDMI_I2S_WORD_LEN_MAX24_23BITS	(0x4 << 1) +#define HDMI_I2S_WORD_LEN_MAX24_24BITS	(0x5 << 1) +#define HDMI_I2S_WORD_LEN_MAX24_21BITS	(0x6 << 1) +#define HDMI_I2S_WORD_LEN_MAX20_16BITS	(0x1 << 1) +#define HDMI_I2S_WORD_LEN_MAX20_18BITS	(0x2 << 1) +#define HDMI_I2S_WORD_LEN_MAX20_19BITS	(0x4 << 1) +#define HDMI_I2S_WORD_LEN_MAX20_20BITS	(0x5 << 1) +#define HDMI_I2S_WORD_LEN_MAX20_17BITS	(0x6 << 1) +#define HDMI_I2S_WORD_LEN_MAX_24BITS	(1) +#define HDMI_I2S_WORD_LEN_MAX_20BITS	(0) + +/* I2S_MUX_CH */ +#define HDMI_I2S_CH3_R_EN		(1 << 7) +#define HDMI_I2S_CH3_L_EN		(1 << 6) +#define HDMI_I2S_CH3_EN			(3 << 6) +#define HDMI_I2S_CH2_R_EN		(1 << 5) +#define HDMI_I2S_CH2_L_EN		(1 << 4) +#define HDMI_I2S_CH2_EN			(3 << 4) +#define HDMI_I2S_CH1_R_EN		(1 << 3) +#define HDMI_I2S_CH1_L_EN		(1 << 2) +#define HDMI_I2S_CH1_EN			(3 << 2) +#define HDMI_I2S_CH0_R_EN		(1 << 1) +#define HDMI_I2S_CH0_L_EN		(1) +#define HDMI_I2S_CH0_EN			(3) +#define HDMI_I2S_CH_ALL_EN		(0xFF) +#define HDMI_I2S_MUX_CH_CLR		(~HDMI_I2S_CH_ALL_EN) + +/* I2S_MUX_CUV */ +#define HDMI_I2S_CUV_R_EN		(1 << 1) +#define HDMI_I2S_CUV_L_EN		(1) +#define HDMI_I2S_CUV_RL_EN		(0x03) + +/* I2S_CUV_L_R */ +#define HDMI_I2S_CUV_R_DATA_MASK	(0x7 << 4) +#define HDMI_I2S_CUV_L_DATA_MASK	(0x7) + +/* Timing generator registers */ +/* TG configure/status registers */ +#define HDMI_TG_VACT_ST3_L		HDMI_TG_BASE(0x0068) +#define HDMI_TG_VACT_ST3_H		HDMI_TG_BASE(0x006c) +#define HDMI_TG_VACT_ST4_L		HDMI_TG_BASE(0x0070) +#define HDMI_TG_VACT_ST4_H		HDMI_TG_BASE(0x0074) +#define HDMI_TG_3D			HDMI_TG_BASE(0x00F0) + +/* HDMI PHY Registers Offsets*/ +#define HDMIPHY_POWER		(0x74 >> 2) +#define HDMIPHY_MODE_SET_DONE		(0x7c >> 2) + +/* HDMI PHY Values */ +#define HDMI_PHY_POWER_ON              0x80 +#define HDMI_PHY_POWER_OFF             0xff + +/* HDMI PHY Values */ +#define HDMI_PHY_DISABLE_MODE_SET	0x80 +#define HDMI_PHY_ENABLE_MODE_SET	0x00 + +/* PMU Registers for PHY */ +#define PMU_HDMI_PHY_CONTROL		0x700 +#define PMU_HDMI_PHY_ENABLE_BIT		BIT(0) + +#endif /* SAMSUNG_REGS_HDMI_H */ diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h new file mode 100644 index 00000000000..5f32e1a2941 --- /dev/null +++ b/drivers/gpu/drm/exynos/regs-mixer.h @@ -0,0 +1,152 @@ +/* + * + *  Cloned from drivers/media/video/s5p-tv/regs-mixer.h + * + * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * Mixer register header file for Samsung Mixer driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ +#ifndef SAMSUNG_REGS_MIXER_H +#define SAMSUNG_REGS_MIXER_H + +/* + * Register part + */ +#define MXR_STATUS			0x0000 +#define MXR_CFG				0x0004 +#define MXR_INT_EN			0x0008 +#define MXR_INT_STATUS			0x000C +#define MXR_LAYER_CFG			0x0010 +#define MXR_VIDEO_CFG			0x0014 +#define MXR_GRAPHIC0_CFG		0x0020 +#define MXR_GRAPHIC0_BASE		0x0024 +#define MXR_GRAPHIC0_SPAN		0x0028 +#define MXR_GRAPHIC0_SXY		0x002C +#define MXR_GRAPHIC0_WH			0x0030 +#define MXR_GRAPHIC0_DXY		0x0034 +#define MXR_GRAPHIC0_BLANK		0x0038 +#define MXR_GRAPHIC1_CFG		0x0040 +#define MXR_GRAPHIC1_BASE		0x0044 +#define MXR_GRAPHIC1_SPAN		0x0048 +#define MXR_GRAPHIC1_SXY		0x004C +#define MXR_GRAPHIC1_WH			0x0050 +#define MXR_GRAPHIC1_DXY		0x0054 +#define MXR_GRAPHIC1_BLANK		0x0058 +#define MXR_BG_CFG			0x0060 +#define MXR_BG_COLOR0			0x0064 +#define MXR_BG_COLOR1			0x0068 +#define MXR_BG_COLOR2			0x006C +#define MXR_CM_COEFF_Y			0x0080 +#define MXR_CM_COEFF_CB			0x0084 +#define MXR_CM_COEFF_CR			0x0088 +#define MXR_MO				0x0304 +#define MXR_RESOLUTION			0x0310 + +#define MXR_GRAPHIC0_BASE_S		0x2024 +#define MXR_GRAPHIC1_BASE_S		0x2044 + +/* for parametrized access to layer registers */ +#define MXR_GRAPHIC_CFG(i)		(0x0020 + (i) * 0x20) +#define MXR_GRAPHIC_BASE(i)		(0x0024 + (i) * 0x20) +#define MXR_GRAPHIC_SPAN(i)		(0x0028 + (i) * 0x20) +#define MXR_GRAPHIC_SXY(i)		(0x002C + (i) * 0x20) +#define MXR_GRAPHIC_WH(i)		(0x0030 + (i) * 0x20) +#define MXR_GRAPHIC_DXY(i)		(0x0034 + (i) * 0x20) +#define MXR_GRAPHIC_BLANK(i)		(0x0038 + (i) * 0x20) +#define MXR_GRAPHIC_BASE_S(i)		(0x2024 + (i) * 0x20) + +/* + * Bit definition part + */ + +/* generates mask for range of bits */ +#define MXR_MASK(high_bit, low_bit) \ +	(((2 << ((high_bit) - (low_bit))) - 1) << (low_bit)) + +#define MXR_MASK_VAL(val, high_bit, low_bit) \ +	(((val) << (low_bit)) & MXR_MASK(high_bit, low_bit)) + +/* bits for MXR_STATUS */ +#define MXR_STATUS_SOFT_RESET		(1 << 8) +#define MXR_STATUS_16_BURST		(1 << 7) +#define MXR_STATUS_BURST_MASK		(1 << 7) +#define MXR_STATUS_BIG_ENDIAN		(1 << 3) +#define MXR_STATUS_ENDIAN_MASK		(1 << 3) +#define MXR_STATUS_SYNC_ENABLE		(1 << 2) +#define MXR_STATUS_REG_IDLE		(1 << 1) +#define MXR_STATUS_REG_RUN		(1 << 0) + +/* bits for MXR_CFG */ +#define MXR_CFG_LAYER_UPDATE		(1 << 31) +#define MXR_CFG_LAYER_UPDATE_COUNT_MASK (3 << 29) +#define MXR_CFG_RGB601_0_255		(0 << 9) +#define MXR_CFG_RGB601_16_235		(1 << 9) +#define MXR_CFG_RGB709_0_255		(2 << 9) +#define MXR_CFG_RGB709_16_235		(3 << 9) +#define MXR_CFG_RGB_FMT_MASK		0x600 +#define MXR_CFG_OUT_YUV444		(0 << 8) +#define MXR_CFG_OUT_RGB888		(1 << 8) +#define MXR_CFG_OUT_MASK		(1 << 8) +#define MXR_CFG_DST_SDO			(0 << 7) +#define MXR_CFG_DST_HDMI		(1 << 7) +#define MXR_CFG_DST_MASK		(1 << 7) +#define MXR_CFG_SCAN_HD_720		(0 << 6) +#define MXR_CFG_SCAN_HD_1080		(1 << 6) +#define MXR_CFG_GRP1_ENABLE		(1 << 5) +#define MXR_CFG_GRP0_ENABLE		(1 << 4) +#define MXR_CFG_VP_ENABLE		(1 << 3) +#define MXR_CFG_SCAN_INTERLACE		(0 << 2) +#define MXR_CFG_SCAN_PROGRASSIVE	(1 << 2) +#define MXR_CFG_SCAN_NTSC		(0 << 1) +#define MXR_CFG_SCAN_PAL		(1 << 1) +#define MXR_CFG_SCAN_SD			(0 << 0) +#define MXR_CFG_SCAN_HD			(1 << 0) +#define MXR_CFG_SCAN_MASK		0x47 + +/* bits for MXR_GRAPHICn_CFG */ +#define MXR_GRP_CFG_COLOR_KEY_DISABLE	(1 << 21) +#define MXR_GRP_CFG_BLEND_PRE_MUL	(1 << 20) +#define MXR_GRP_CFG_WIN_BLEND_EN	(1 << 17) +#define MXR_GRP_CFG_PIXEL_BLEND_EN	(1 << 16) +#define MXR_GRP_CFG_FORMAT_VAL(x)	MXR_MASK_VAL(x, 11, 8) +#define MXR_GRP_CFG_FORMAT_MASK		MXR_GRP_CFG_FORMAT_VAL(~0) +#define MXR_GRP_CFG_ALPHA_VAL(x)	MXR_MASK_VAL(x, 7, 0) + +/* bits for MXR_GRAPHICn_WH */ +#define MXR_GRP_WH_H_SCALE(x)		MXR_MASK_VAL(x, 28, 28) +#define MXR_GRP_WH_V_SCALE(x)		MXR_MASK_VAL(x, 12, 12) +#define MXR_GRP_WH_WIDTH(x)		MXR_MASK_VAL(x, 26, 16) +#define MXR_GRP_WH_HEIGHT(x)		MXR_MASK_VAL(x, 10, 0) + +/* bits for MXR_RESOLUTION */ +#define MXR_MXR_RES_HEIGHT(x)		MXR_MASK_VAL(x, 26, 16) +#define MXR_MXR_RES_WIDTH(x)		MXR_MASK_VAL(x, 10, 0) + +/* bits for MXR_GRAPHICn_SXY */ +#define MXR_GRP_SXY_SX(x)		MXR_MASK_VAL(x, 26, 16) +#define MXR_GRP_SXY_SY(x)		MXR_MASK_VAL(x, 10, 0) + +/* bits for MXR_GRAPHICn_DXY */ +#define MXR_GRP_DXY_DX(x)		MXR_MASK_VAL(x, 26, 16) +#define MXR_GRP_DXY_DY(x)		MXR_MASK_VAL(x, 10, 0) + +/* bits for MXR_INT_EN */ +#define MXR_INT_EN_VSYNC		(1 << 11) +#define MXR_INT_EN_ALL			(0x0f << 8) + +/* bit for MXR_INT_STATUS */ +#define MXR_INT_CLEAR_VSYNC		(1 << 11) +#define MXR_INT_STATUS_VSYNC		(1 << 0) + +/* bit for MXR_LAYER_CFG */ +#define MXR_LAYER_CFG_GRP1_VAL(x)	MXR_MASK_VAL(x, 11, 8) +#define MXR_LAYER_CFG_GRP0_VAL(x)	MXR_MASK_VAL(x, 7, 4) +#define MXR_LAYER_CFG_VP_VAL(x)		MXR_MASK_VAL(x, 3, 0) + +#endif /* SAMSUNG_REGS_MIXER_H */ + diff --git a/drivers/gpu/drm/exynos/regs-rotator.h b/drivers/gpu/drm/exynos/regs-rotator.h new file mode 100644 index 00000000000..a09ac6e180d --- /dev/null +++ b/drivers/gpu/drm/exynos/regs-rotator.h @@ -0,0 +1,73 @@ +/* drivers/gpu/drm/exynos/regs-rotator.h + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + *		http://www.samsung.com/ + * + * Register definition file for Samsung Rotator Interface (Rotator) driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef EXYNOS_REGS_ROTATOR_H +#define EXYNOS_REGS_ROTATOR_H + +/* Configuration */ +#define ROT_CONFIG			0x00 +#define ROT_CONFIG_IRQ			(3 << 8) + +/* Image Control */ +#define ROT_CONTROL			0x10 +#define ROT_CONTROL_PATTERN_WRITE	(1 << 16) +#define ROT_CONTROL_FMT_YCBCR420_2P	(1 << 8) +#define ROT_CONTROL_FMT_RGB888		(6 << 8) +#define ROT_CONTROL_FMT_MASK		(7 << 8) +#define ROT_CONTROL_FLIP_VERTICAL	(2 << 6) +#define ROT_CONTROL_FLIP_HORIZONTAL	(3 << 6) +#define ROT_CONTROL_FLIP_MASK		(3 << 6) +#define ROT_CONTROL_ROT_90		(1 << 4) +#define ROT_CONTROL_ROT_180		(2 << 4) +#define ROT_CONTROL_ROT_270		(3 << 4) +#define ROT_CONTROL_ROT_MASK		(3 << 4) +#define ROT_CONTROL_START		(1 << 0) + +/* Status */ +#define ROT_STATUS			0x20 +#define ROT_STATUS_IRQ_PENDING(x)	(1 << (x)) +#define ROT_STATUS_IRQ(x)		(((x) >> 8) & 0x3) +#define ROT_STATUS_IRQ_VAL_COMPLETE	1 +#define ROT_STATUS_IRQ_VAL_ILLEGAL	2 + +/* Buffer Address */ +#define ROT_SRC_BUF_ADDR(n)		(0x30 + ((n) << 2)) +#define ROT_DST_BUF_ADDR(n)		(0x50 + ((n) << 2)) + +/* Buffer Size */ +#define ROT_SRC_BUF_SIZE		0x3c +#define ROT_DST_BUF_SIZE		0x5c +#define ROT_SET_BUF_SIZE_H(x)		((x) << 16) +#define ROT_SET_BUF_SIZE_W(x)		((x) << 0) +#define ROT_GET_BUF_SIZE_H(x)		((x) >> 16) +#define ROT_GET_BUF_SIZE_W(x)		((x) & 0xffff) + +/* Crop Position */ +#define ROT_SRC_CROP_POS		0x40 +#define ROT_DST_CROP_POS		0x60 +#define ROT_CROP_POS_Y(x)		((x) << 16) +#define ROT_CROP_POS_X(x)		((x) << 0) + +/* Source Crop Size */ +#define ROT_SRC_CROP_SIZE		0x44 +#define ROT_SRC_CROP_SIZE_H(x)		((x) << 16) +#define ROT_SRC_CROP_SIZE_W(x)		((x) << 0) + +/* Round to nearest aligned value */ +#define ROT_ALIGN(x, align, mask)	(((x) + (1 << ((align) - 1))) & (mask)) +/* Minimum limit value */ +#define ROT_MIN(min, mask)		(((min) + ~(mask)) & (mask)) +/* Maximum limit value */ +#define ROT_MAX(max, mask)		((max) & (mask)) + +#endif /* EXYNOS_REGS_ROTATOR_H */ + diff --git a/drivers/gpu/drm/exynos/regs-vp.h b/drivers/gpu/drm/exynos/regs-vp.h new file mode 100644 index 00000000000..10b737af0a7 --- /dev/null +++ b/drivers/gpu/drm/exynos/regs-vp.h @@ -0,0 +1,91 @@ +/* + * + *  Cloned from drivers/media/video/s5p-tv/regs-vp.h + * + * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. + *		http://www.samsung.com/ + * + * Video processor register header file for Samsung Mixer driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef SAMSUNG_REGS_VP_H +#define SAMSUNG_REGS_VP_H + +/* + * Register part + */ + +#define VP_ENABLE			0x0000 +#define VP_SRESET			0x0004 +#define VP_SHADOW_UPDATE		0x0008 +#define VP_FIELD_ID			0x000C +#define VP_MODE				0x0010 +#define VP_IMG_SIZE_Y			0x0014 +#define VP_IMG_SIZE_C			0x0018 +#define VP_PER_RATE_CTRL		0x001C +#define VP_TOP_Y_PTR			0x0028 +#define VP_BOT_Y_PTR			0x002C +#define VP_TOP_C_PTR			0x0030 +#define VP_BOT_C_PTR			0x0034 +#define VP_ENDIAN_MODE			0x03CC +#define VP_SRC_H_POSITION		0x0044 +#define VP_SRC_V_POSITION		0x0048 +#define VP_SRC_WIDTH			0x004C +#define VP_SRC_HEIGHT			0x0050 +#define VP_DST_H_POSITION		0x0054 +#define VP_DST_V_POSITION		0x0058 +#define VP_DST_WIDTH			0x005C +#define VP_DST_HEIGHT			0x0060 +#define VP_H_RATIO			0x0064 +#define VP_V_RATIO			0x0068 +#define VP_POLY8_Y0_LL			0x006C +#define VP_POLY4_Y0_LL			0x00EC +#define VP_POLY4_C0_LL			0x012C + +/* + * Bit definition part + */ + +/* generates mask for range of bits */ + +#define VP_MASK(high_bit, low_bit) \ +	(((2 << ((high_bit) - (low_bit))) - 1) << (low_bit)) + +#define VP_MASK_VAL(val, high_bit, low_bit) \ +	(((val) << (low_bit)) & VP_MASK(high_bit, low_bit)) + + /* VP_ENABLE */ +#define VP_ENABLE_ON			(1 << 0) + +/* VP_SRESET */ +#define VP_SRESET_PROCESSING		(1 << 0) + +/* VP_SHADOW_UPDATE */ +#define VP_SHADOW_UPDATE_ENABLE		(1 << 0) + +/* VP_MODE */ +#define VP_MODE_NV12			(0 << 6) +#define VP_MODE_NV21			(1 << 6) +#define VP_MODE_LINE_SKIP		(1 << 5) +#define VP_MODE_MEM_LINEAR		(0 << 4) +#define VP_MODE_MEM_TILED		(1 << 4) +#define VP_MODE_FMT_MASK		(5 << 4) +#define VP_MODE_FIELD_ID_AUTO_TOGGLING	(1 << 2) +#define VP_MODE_2D_IPC			(1 << 1) + +/* VP_IMG_SIZE_Y */ +/* VP_IMG_SIZE_C */ +#define VP_IMG_HSIZE(x)			VP_MASK_VAL(x, 29, 16) +#define VP_IMG_VSIZE(x)			VP_MASK_VAL(x, 13, 0) + +/* VP_SRC_H_POSITION */ +#define VP_SRC_H_POSITION_VAL(x)	VP_MASK_VAL(x, 14, 4) + +/* VP_ENDIAN_MODE */ +#define VP_ENDIAN_MODE_LITTLE		(1 << 0) + +#endif /* SAMSUNG_REGS_VP_H */  | 
