Project

General

Profile

Submit #3031 » radeon47fast.patch

davshao, 04/26/2017 08:57 PM

View differences:

sys/conf/files
dev/drm/radeon/radeon_agp.c optional radeon drm
dev/drm/radeon/radeon_asic.c optional radeon drm
dev/drm/radeon/radeon_atombios.c optional radeon drm
dev/drm/radeon/radeon_audio.c optional radeon drm
dev/drm/radeon/radeon_benchmark.c optional radeon drm
dev/drm/radeon/radeon_bios.c optional radeon drm
dev/drm/radeon/radeon_clocks.c optional radeon drm
......
dev/drm/radeon/radeon_cursor.c optional radeon drm
dev/drm/radeon/radeon_device.c optional radeon drm
dev/drm/radeon/radeon_display.c optional radeon drm
dev/drm/radeon/radeon_dp_auxch.c optional radeon drm
dev/drm/radeon/radeon_drv.c optional radeon drm
dev/drm/radeon/radeon_encoders.c optional radeon drm
dev/drm/radeon/radeon_fb.c optional radeon drm
sys/dev/drm/radeon/Makefile
radeon_agp.c \
radeon_asic.c \
radeon_atombios.c \
radeon_audio.c \
radeon_benchmark.c \
radeon_bios.c \
radeon_clocks.c \
......
radeon_cursor.c \
radeon_device.c \
radeon_display.c \
radeon_dp_auxch.c \
radeon_drv.c \
radeon_encoders.c \
radeon_fb.c \
sys/dev/drm/radeon/atom.c
int atom_debug = 0;
static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
static uint32_t atom_arg_mask[8] =
{ 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
0xFF000000 };
static uint32_t atom_arg_mask[8] = {
0xFFFFFFFF, 0x0000FFFF, 0x00FFFF00, 0xFFFF0000,
0x000000FF, 0x0000FF00, 0x00FF0000, 0xFF000000
};
static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
static int atom_dst_to_src[8][4] = {
......
kprintf(" ");
}
#define ATOM_DEBUG_PRINT(...) do if (atom_debug) { kprintf(__FILE__ __VA_ARGS__); } while (0)
#define ATOM_SDEBUG_PRINT(...) do if (atom_debug) { kprintf(__FILE__); debug_print_spaces(debug_depth); kprintf(__VA_ARGS__); } while (0)
#else
#define ATOM_DEBUG_PRINT(...) do { } while (0)
#define ATOM_SDEBUG_PRINT(...) do { } while (0)
#ifdef DEBUG
#undef DEBUG
#endif
#ifdef SDEBUG
#undef SDEBUG
#endif
#define DEBUG(...) do if (atom_debug) { kprintf(__FILE__ __VA_ARGS__); } while (0)
#define SDEBUG(...) do if (atom_debug) { kprintf(__FILE__); debug_print_spaces(debug_depth); kprintf(__VA_ARGS__); } while (0)
#else /* !ATOM_DEBUG */
#define DEBUG(...) do { } while (0)
#define SDEBUG(...) do { } while (0)
#endif /* ATOM_DEBUG */
static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
uint32_t index, uint32_t data)
{
......
idx = U16(*ptr);
(*ptr) += 2;
if (print)
ATOM_DEBUG_PRINT("REG[0x%04X]", idx);
DEBUG("REG[0x%04X]", idx);
idx += gctx->reg_block;
switch (gctx->io_mode) {
case ATOM_IO_MM:
......
* tables, noticed on a DEC Alpha. */
val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
if (print)
ATOM_DEBUG_PRINT("PS[0x%02X,0x%04X]", idx, val);
DEBUG("PS[0x%02X,0x%04X]", idx, val);
break;
case ATOM_ARG_WS:
idx = U8(*ptr);
(*ptr)++;
if (print)
ATOM_DEBUG_PRINT("WS[0x%02X]", idx);
DEBUG("WS[0x%02X]", idx);
switch (idx) {
case ATOM_WS_QUOTIENT:
val = gctx->divmul[0];
......
(*ptr) += 2;
if (print) {
if (gctx->data_block)
ATOM_DEBUG_PRINT("ID[0x%04X+%04X]", idx, gctx->data_block);
DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
else
ATOM_DEBUG_PRINT("ID[0x%04X]", idx);
DEBUG("ID[0x%04X]", idx);
}
val = U32(idx + gctx->data_block);
break;
......
} else
val = gctx->scratch[(gctx->fb_base / 4) + idx];
if (print)
ATOM_DEBUG_PRINT("FB[0x%02X]", idx);
DEBUG("FB[0x%02X]", idx);
break;
case ATOM_ARG_IMM:
switch (align) {
......
val = U32(*ptr);
(*ptr) += 4;
if (print)
ATOM_DEBUG_PRINT("IMM 0x%08X\n", val);
DEBUG("IMM 0x%08X\n", val);
return val;
case ATOM_SRC_WORD0:
case ATOM_SRC_WORD8:
......
val = U16(*ptr);
(*ptr) += 2;
if (print)
ATOM_DEBUG_PRINT("IMM 0x%04X\n", val);
DEBUG("IMM 0x%04X\n", val);
return val;
case ATOM_SRC_BYTE0:
case ATOM_SRC_BYTE8:
......
val = U8(*ptr);
(*ptr)++;
if (print)
ATOM_DEBUG_PRINT("IMM 0x%02X\n", val);
DEBUG("IMM 0x%02X\n", val);
return val;
}
return 0;
......
idx = U8(*ptr);
(*ptr)++;
if (print)
ATOM_DEBUG_PRINT("PLL[0x%02X]", idx);
DEBUG("PLL[0x%02X]", idx);
val = gctx->card->pll_read(gctx->card, idx);
break;
case ATOM_ARG_MC:
idx = U8(*ptr);
(*ptr)++;
if (print)
ATOM_DEBUG_PRINT("MC[0x%02X]", idx);
DEBUG("MC[0x%02X]", idx);
val = gctx->card->mc_read(gctx->card, idx);
break;
}
......
if (print)
switch (align) {
case ATOM_SRC_DWORD:
ATOM_DEBUG_PRINT(".[31:0] -> 0x%08X\n", val);
DEBUG(".[31:0] -> 0x%08X\n", val);
break;
case ATOM_SRC_WORD0:
ATOM_DEBUG_PRINT(".[15:0] -> 0x%04X\n", val);
DEBUG(".[15:0] -> 0x%04X\n", val);
break;
case ATOM_SRC_WORD8:
ATOM_DEBUG_PRINT(".[23:8] -> 0x%04X\n", val);
DEBUG(".[23:8] -> 0x%04X\n", val);
break;
case ATOM_SRC_WORD16:
ATOM_DEBUG_PRINT(".[31:16] -> 0x%04X\n", val);
DEBUG(".[31:16] -> 0x%04X\n", val);
break;
case ATOM_SRC_BYTE0:
ATOM_DEBUG_PRINT(".[7:0] -> 0x%02X\n", val);
DEBUG(".[7:0] -> 0x%02X\n", val);
break;
case ATOM_SRC_BYTE8:
ATOM_DEBUG_PRINT(".[15:8] -> 0x%02X\n", val);
DEBUG(".[15:8] -> 0x%02X\n", val);
break;
case ATOM_SRC_BYTE16:
ATOM_DEBUG_PRINT(".[23:16] -> 0x%02X\n", val);
DEBUG(".[23:16] -> 0x%02X\n", val);
break;
case ATOM_SRC_BYTE24:
ATOM_DEBUG_PRINT(".[31:24] -> 0x%02X\n", val);
DEBUG(".[31:24] -> 0x%02X\n", val);
break;
}
return val;
......
case ATOM_ARG_REG:
idx = U16(*ptr);
(*ptr) += 2;
ATOM_DEBUG_PRINT("REG[0x%04X]", idx);
DEBUG("REG[0x%04X]", idx);
idx += gctx->reg_block;
switch (gctx->io_mode) {
case ATOM_IO_MM:
......
case ATOM_ARG_PS:
idx = U8(*ptr);
(*ptr)++;
ATOM_DEBUG_PRINT("PS[0x%02X]", idx);
DEBUG("PS[0x%02X]", idx);
ctx->ps[idx] = cpu_to_le32(val);
break;
case ATOM_ARG_WS:
idx = U8(*ptr);
(*ptr)++;
ATOM_DEBUG_PRINT("WS[0x%02X]", idx);
DEBUG("WS[0x%02X]", idx);
switch (idx) {
case ATOM_WS_QUOTIENT:
gctx->divmul[0] = val;
......
gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
} else
gctx->scratch[(gctx->fb_base / 4) + idx] = val;
ATOM_DEBUG_PRINT("FB[0x%02X]", idx);
DEBUG("FB[0x%02X]", idx);
break;
case ATOM_ARG_PLL:
idx = U8(*ptr);
(*ptr)++;
ATOM_DEBUG_PRINT("PLL[0x%02X]", idx);
DEBUG("PLL[0x%02X]", idx);
gctx->card->pll_write(gctx->card, idx, val);
break;
case ATOM_ARG_MC:
idx = U8(*ptr);
(*ptr)++;
ATOM_DEBUG_PRINT("MC[0x%02X]", idx);
DEBUG("MC[0x%02X]", idx);
gctx->card->mc_write(gctx->card, idx, val);
return;
}
switch (align) {
case ATOM_SRC_DWORD:
ATOM_DEBUG_PRINT(".[31:0] <- 0x%08X\n", old_val);
DEBUG(".[31:0] <- 0x%08X\n", old_val);
break;
case ATOM_SRC_WORD0:
ATOM_DEBUG_PRINT(".[15:0] <- 0x%04X\n", old_val);
DEBUG(".[15:0] <- 0x%04X\n", old_val);
break;
case ATOM_SRC_WORD8:
ATOM_DEBUG_PRINT(".[23:8] <- 0x%04X\n", old_val);
DEBUG(".[23:8] <- 0x%04X\n", old_val);
break;
case ATOM_SRC_WORD16:
ATOM_DEBUG_PRINT(".[31:16] <- 0x%04X\n", old_val);
DEBUG(".[31:16] <- 0x%04X\n", old_val);
break;
case ATOM_SRC_BYTE0:
ATOM_DEBUG_PRINT(".[7:0] <- 0x%02X\n", old_val);
DEBUG(".[7:0] <- 0x%02X\n", old_val);
break;
case ATOM_SRC_BYTE8:
ATOM_DEBUG_PRINT(".[15:8] <- 0x%02X\n", old_val);
DEBUG(".[15:8] <- 0x%02X\n", old_val);
break;
case ATOM_SRC_BYTE16:
ATOM_DEBUG_PRINT(".[23:16] <- 0x%02X\n", old_val);
DEBUG(".[23:16] <- 0x%02X\n", old_val);
break;
case ATOM_SRC_BYTE24:
ATOM_DEBUG_PRINT(".[31:24] <- 0x%02X\n", old_val);
DEBUG(".[31:24] <- 0x%02X\n", old_val);
break;
}
}
......
uint8_t attr = U8((*ptr)++);
uint32_t dst, src, saved;
int dptr = *ptr;
ATOM_SDEBUG_PRINT(" dst: ");
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
ATOM_SDEBUG_PRINT(" src: ");
SDEBUG(" src: ");
src = atom_get_src(ctx, attr, ptr);
dst += src;
ATOM_SDEBUG_PRINT(" dst: ");
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
......
uint8_t attr = U8((*ptr)++);
uint32_t dst, src, saved;
int dptr = *ptr;
ATOM_SDEBUG_PRINT(" dst: ");
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
ATOM_SDEBUG_PRINT(" src: ");
SDEBUG(" src: ");
src = atom_get_src(ctx, attr, ptr);
dst &= src;
ATOM_SDEBUG_PRINT(" dst: ");
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
......
int r = 0;
if (idx < ATOM_TABLE_NAMES_CNT)
ATOM_SDEBUG_PRINT(" table: %d (%s)\n", idx, atom_table_names[idx]);
SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
else
ATOM_SDEBUG_PRINT(" table: %d\n", idx);
SDEBUG(" table: %d\n", idx);
if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
if (r) {
......
attr &= 0x38;
attr |= atom_def_dst[attr >> 3] << 6;
atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
ATOM_SDEBUG_PRINT(" dst: ");
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
}
......
{
uint8_t attr = U8((*ptr)++);
uint32_t dst, src;
ATOM_SDEBUG_PRINT(" src1: ");
SDEBUG(" src1: ");
dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
ATOM_SDEBUG_PRINT(" src2: ");
SDEBUG(" src2: ");
src = atom_get_src(ctx, attr, ptr);
ctx->ctx->cs_equal = (dst == src);
ctx->ctx->cs_above = (dst > src);
ATOM_SDEBUG_PRINT(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
ctx->ctx->cs_above ? "GT" : "LE");
}
static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
{
unsigned count = U8((*ptr)++);
ATOM_SDEBUG_PRINT(" count: %d\n", count);
SDEBUG(" count: %d\n", count);
if (arg == ATOM_UNIT_MICROSEC)
udelay(count);
else if (!drm_can_sleep())
......
{
uint8_t attr = U8((*ptr)++);
uint32_t dst, src;
ATOM_SDEBUG_PRINT(" src1: ");
SDEBUG(" src1: ");
dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
ATOM_SDEBUG_PRINT(" src2: ");
SDEBUG(" src2: ");
src = atom_get_src(ctx, attr, ptr);
if (src != 0) {
ctx->ctx->divmul[0] = dst / src;
......
break;
}
if (arg != ATOM_COND_ALWAYS)
ATOM_SDEBUG_PRINT(" taken: %s\n", execute ? "yes" : "no");
ATOM_SDEBUG_PRINT(" target: 0x%04X\n", target);
SDEBUG(" taken: %s\n", execute ? "yes" : "no");
SDEBUG(" target: 0x%04X\n", target);
if (execute) {
if (ctx->last_jump == (ctx->start + target)) {
cjiffies = jiffies;
......
uint8_t attr = U8((*ptr)++);
uint32_t dst, mask, src, saved;
int dptr = *ptr;
ATOM_SDEBUG_PRINT(" dst: ");
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
ATOM_SDEBUG_PRINT(" mask: 0x%08x", mask);
ATOM_SDEBUG_PRINT(" src: ");
SDEBUG(" mask: 0x%08x", mask);
SDEBUG(" src: ");
src = atom_get_src(ctx, attr, ptr);
dst &= mask;
dst |= src;
ATOM_SDEBUG_PRINT(" dst: ");
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
......
atom_skip_dst(ctx, arg, attr, ptr);
saved = 0xCDCDCDCD;
}
ATOM_SDEBUG_PRINT(" src: ");
SDEBUG(" src: ");
src = atom_get_src(ctx, attr, ptr);
ATOM_SDEBUG_PRINT(" dst: ");
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, src, saved);
}
......
{
uint8_t attr = U8((*ptr)++);
uint32_t dst, src;
ATOM_SDEBUG_PRINT(" src1: ");
SDEBUG(" src1: ");
dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
ATOM_SDEBUG_PRINT(" src2: ");
SDEBUG(" src2: ");
src = atom_get_src(ctx, attr, ptr);
ctx->ctx->divmul[0] = dst * src;
}
......
uint8_t attr = U8((*ptr)++);
uint32_t dst, src, saved;
int dptr = *ptr;
ATOM_SDEBUG_PRINT(" dst: ");
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
ATOM_SDEBUG_PRINT(" src: ");
SDEBUG(" src: ");
src = atom_get_src(ctx, attr, ptr);
dst |= src;
ATOM_SDEBUG_PRINT(" dst: ");
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t val = U8((*ptr)++);
ATOM_SDEBUG_PRINT("POST card output: 0x%02X\n", val);
SDEBUG("POST card output: 0x%02X\n", val);
}
static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
......
{
int idx = U8(*ptr);
(*ptr)++;
ATOM_SDEBUG_PRINT(" block: %d\n", idx);
SDEBUG(" block: %d\n", idx);
if (!idx)
ctx->ctx->data_block = 0;
else if (idx == 255)
ctx->ctx->data_block = ctx->start;
else
ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
ATOM_SDEBUG_PRINT(" base: 0x%04X\n", ctx->ctx->data_block);
SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block);
}
static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++);
ATOM_SDEBUG_PRINT(" fb_base: ");
SDEBUG(" fb_base: ");
ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
}
......
case ATOM_PORT_ATI:
port = U16(*ptr);
if (port < ATOM_IO_NAMES_CNT)
ATOM_SDEBUG_PRINT(" port: %d (%s)\n", port, atom_io_names[port]);
SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]);
else
ATOM_SDEBUG_PRINT(" port: %d\n", port);
SDEBUG(" port: %d\n", port);
if (!port)
ctx->ctx->io_mode = ATOM_IO_MM;
else
......
{
ctx->ctx->reg_block = U16(*ptr);
(*ptr) += 2;
ATOM_SDEBUG_PRINT(" base: 0x%04X\n", ctx->ctx->reg_block);
SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
}
static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
......
int dptr = *ptr;
attr &= 0x38;
attr |= atom_def_dst[attr >> 3] << 6;
ATOM_SDEBUG_PRINT(" dst: ");
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
ATOM_SDEBUG_PRINT(" shift: %d\n", shift);
SDEBUG(" shift: %d\n", shift);
dst <<= shift;
ATOM_SDEBUG_PRINT(" dst: ");
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
......
int dptr = *ptr;
attr &= 0x38;
attr |= atom_def_dst[attr >> 3] << 6;
ATOM_SDEBUG_PRINT(" dst: ");
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
ATOM_SDEBUG_PRINT(" shift: %d\n", shift);
SDEBUG(" shift: %d\n", shift);
dst >>= shift;
ATOM_SDEBUG_PRINT(" dst: ");
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
......
uint32_t saved, dst;
int dptr = *ptr;
uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
ATOM_SDEBUG_PRINT(" dst: ");
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
/* op needs to full dst value */
dst = saved;
shift = atom_get_src(ctx, attr, ptr);
ATOM_SDEBUG_PRINT(" shift: %d\n", shift);
SDEBUG(" shift: %d\n", shift);
dst <<= shift;
dst &= atom_arg_mask[dst_align];
dst >>= atom_arg_shift[dst_align];
ATOM_SDEBUG_PRINT(" dst: ");
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
......
uint32_t saved, dst;
int dptr = *ptr;
uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
ATOM_SDEBUG_PRINT(" dst: ");
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
/* op needs to full dst value */
dst = saved;
shift = atom_get_src(ctx, attr, ptr);
ATOM_SDEBUG_PRINT(" shift: %d\n", shift);
SDEBUG(" shift: %d\n", shift);
dst >>= shift;
dst &= atom_arg_mask[dst_align];
dst >>= atom_arg_shift[dst_align];
ATOM_SDEBUG_PRINT(" dst: ");
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
......
uint8_t attr = U8((*ptr)++);
uint32_t dst, src, saved;
int dptr = *ptr;
ATOM_SDEBUG_PRINT(" dst: ");
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
ATOM_SDEBUG_PRINT(" src: ");
SDEBUG(" src: ");
src = atom_get_src(ctx, attr, ptr);
dst -= src;
ATOM_SDEBUG_PRINT(" dst: ");
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
......
{
uint8_t attr = U8((*ptr)++);
uint32_t src, val, target;
ATOM_SDEBUG_PRINT(" switch: ");
SDEBUG(" switch: ");
src = atom_get_src(ctx, attr, ptr);
while (U16(*ptr) != ATOM_CASE_END)
if (U8(*ptr) == ATOM_CASE_MAGIC) {
(*ptr)++;
ATOM_SDEBUG_PRINT(" case: ");
SDEBUG(" case: ");
val =
atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
ptr);
target = U16(*ptr);
if (val == src) {
ATOM_SDEBUG_PRINT(" target: %04X\n", target);
SDEBUG(" target: %04X\n", target);
*ptr = ctx->start + target;
return;
}
......
{
uint8_t attr = U8((*ptr)++);
uint32_t dst, src;
ATOM_SDEBUG_PRINT(" src1: ");
SDEBUG(" src1: ");
dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
ATOM_SDEBUG_PRINT(" src2: ");
SDEBUG(" src2: ");
src = atom_get_src(ctx, attr, ptr);
ctx->ctx->cs_equal = ((dst & src) == 0);
ATOM_SDEBUG_PRINT(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
}
static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
......
uint8_t attr = U8((*ptr)++);
uint32_t dst, src, saved;
int dptr = *ptr;
ATOM_SDEBUG_PRINT(" dst: ");
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
ATOM_SDEBUG_PRINT(" src: ");
SDEBUG(" src: ");
src = atom_get_src(ctx, attr, ptr);
dst ^= src;
ATOM_SDEBUG_PRINT(" dst: ");
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
......
ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
ptr = base + ATOM_CT_CODE_PTR;
ATOM_SDEBUG_PRINT(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
ectx.ctx = ctx;
ectx.ps_shift = ps / 4;
......
while (1) {
op = CU8(ptr++);
if (op < ATOM_OP_NAMES_CNT)
ATOM_SDEBUG_PRINT("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
else
ATOM_SDEBUG_PRINT("[%d] @ 0x%04X\n", op, ptr - 1);
SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
if (ectx.abort) {
DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
base, len, ws, ps, ptr - 1);
......
break;
}
debug_depth--;
ATOM_SDEBUG_PRINT("<<\n");
SDEBUG("<<\n");
free:
if (ws)
......
{
int r;
lockmgr(&ctx->mutex, LK_EXCLUSIVE);
mutex_lock(&ctx->mutex);
/* reset data block */
ctx->data_block = 0;
/* reset reg block */
......
ctx->divmul[0] = 0;
ctx->divmul[1] = 0;
r = atom_execute_table_locked(ctx, index, params);
lockmgr(&ctx->mutex, LK_RELEASE);
mutex_unlock(&ctx->mutex);
return r;
}
int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
{
int r;
lockmgr(&ctx->scratch_mutex, LK_EXCLUSIVE);
mutex_lock(&ctx->scratch_mutex);
r = atom_execute_table_scratch_unlocked(ctx, index, params);
lockmgr(&ctx->scratch_mutex, LK_RELEASE);
mutex_unlock(&ctx->scratch_mutex);
return r;
}
sys/dev/drm/radeon/atombios.h
typedef struct {
AMD_ACPI_DESCRIPTION_HEADER SHeader;
UCHAR TableUUID[16]; //0x24
ULONG VBIOSImageOffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the stucture.
ULONG Lib1ImageOffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the stucture.
ULONG VBIOSImageOffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the structure.
ULONG Lib1ImageOffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the structure.
ULONG Reserved[4]; //0x3C
}UEFI_ACPI_VFCT;
sys/dev/drm/radeon/atombios_crtc.c
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
atombios_blank_crtc(crtc, ATOM_DISABLE);
drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
if (dev->num_crtcs > radeon_crtc->crtc_id)
drm_vblank_on(dev, radeon_crtc->crtc_id);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
if (dev->num_crtcs > radeon_crtc->crtc_id)
drm_vblank_off(dev, radeon_crtc->crtc_id);
if (radeon_crtc->enabled)
atombios_blank_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
......
misc |= ATOM_COMPOSITESYNC;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
misc |= ATOM_INTERLACE;
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
misc |= ATOM_DOUBLE_CLOCK_MODE;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
misc |= ATOM_DOUBLE_CLOCK_MODE;
misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = radeon_crtc->crtc_id;
......
misc |= ATOM_COMPOSITESYNC;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
misc |= ATOM_INTERLACE;
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
misc |= ATOM_DOUBLE_CLOCK_MODE;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
misc |= ATOM_DOUBLE_CLOCK_MODE;
misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = radeon_crtc->crtc_id;
......
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev))
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
/* use frac fb div on RS780/RS880 */
if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
&& !radeon_crtc->ss_enabled)
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
......
if (radeon_crtc->ss.refdiv) {
radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
if (ASIC_IS_AVIVO(rdev))
if (ASIC_IS_AVIVO(rdev) &&
rdev->family != CHIP_RS780 &&
rdev->family != CHIP_RS880)
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
}
}
......
break;
}
/* Make sure surface address is updated at vertical blank rather than
* horizontal blank
*/
WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, 0);
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
upper_32_bits(fb_location));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
......
(x << 16) | y);
viewport_w = crtc->mode.hdisplay;
viewport_h = (crtc->mode.vdisplay + 1) & ~1;
if ((rdev->family >= CHIP_BONAIRE) &&
(crtc->mode.flags & DRM_MODE_FLAG_INTERLACE))
viewport_h *= 2;
WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
(viewport_w << 16) | viewport_h);
/* pageflip setup */
/* make sure flip is at vb rather than hb */
tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
/* set pageflip to happen only at start of vblank interval (front porch) */
WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
......
uint64_t fb_location;
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
u32 tmp, viewport_w, viewport_h;
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
......
else
WREG32(AVIVO_D2VGA_CONTROL, 0);
/* Make sure surface address is update at vertical blank rather than
* horizontal blank
*/
WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, 0);
if (rdev->family >= CHIP_RV770) {
if (radeon_crtc->crtc_id) {
WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
......
WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
(viewport_w << 16) | viewport_h);
/* pageflip setup */
/* make sure flip is at vb rather than hb */
tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
/* set pageflip to happen only at start of vblank interval (front porch) */
WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
......
static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_crtc *test_crtc;
struct radeon_crtc *test_radeon_crtc;
......
test_radeon_crtc = to_radeon_crtc(test_crtc);
if (test_radeon_crtc->encoder &&
ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
/* PPLL2 is exclusive to UNIPHYA on DCE61 */
if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
test_radeon_crtc->pll_id == ATOM_PPLL2)
continue;
/* for DP use the same PLL for all */
if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
return test_radeon_crtc->pll_id;
......
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_crtc *test_crtc;
struct radeon_crtc *test_radeon_crtc;
u32 adjusted_clock, test_adjusted_clock;
......
test_radeon_crtc = to_radeon_crtc(test_crtc);
if (test_radeon_crtc->encoder &&
!ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
/* PPLL2 is exclusive to UNIPHYA on DCE61 */
if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
test_radeon_crtc->pll_id == ATOM_PPLL2)
continue;
/* check if we are already driving this connector with another crtc */
if (test_radeon_crtc->connector == radeon_crtc->connector) {
/* if we are, return that pll */
......
return pll;
}
/* otherwise, pick one of the plls */
if ((rdev->family == CHIP_KAVERI) ||
(rdev->family == CHIP_KABINI) ||
(rdev->family == CHIP_MULLINS)) {
/* KB/KV/ML has PPLL1 and PPLL2 */
if ((rdev->family == CHIP_KABINI) ||
(rdev->family == CHIP_MULLINS)) {
/* KB/ML has PPLL1 and PPLL2 */
pll_in_use = radeon_get_pll_use_mask(crtc);
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
......
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
} else {
/* CI has PPLL0, PPLL1, and PPLL2 */
/* CI/KV has PPLL0, PPLL1, and PPLL2 */
pll_in_use = radeon_get_pll_use_mask(crtc);
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
......
atombios_crtc_set_base(crtc, x, y, old_fb);
atombios_overscan_setup(crtc, mode, adjusted_mode);
atombios_scaler_setup(crtc);
radeon_cursor_reset(crtc);
/* update the hw version fpr dpm */
radeon_crtc->hw_mode = *adjusted_mode;
......
radeon_crtc->connector = NULL;
return false;
}
if (radeon_crtc->encoder) {
struct radeon_encoder *radeon_encoder =
to_radeon_encoder(radeon_crtc->encoder);
radeon_crtc->output_csc = radeon_encoder->output_csc;
}
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
if (!atombios_crtc_prepare_pll(crtc, adjusted_mode))
......
case ATOM_PPLL0:
/* disable the ppll */
if ((rdev->family == CHIP_ARUBA) ||
(rdev->family == CHIP_KAVERI) ||
(rdev->family == CHIP_BONAIRE) ||
(rdev->family == CHIP_HAWAII))
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
sys/dev/drm/radeon/atombios_dp.c
memset(&args, 0, sizeof(args));
lockmgr(&chan->mutex, LK_EXCLUSIVE);
lockmgr(&rdev->mode_info.atom_context->scratch_mutex, LK_EXCLUSIVE);
mutex_lock(&chan->mutex);
mutex_lock(&rdev->mode_info.atom_context->scratch_mutex);
base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
......
r = recv_bytes;
done:
lockmgr(&rdev->mode_info.atom_context->scratch_mutex, LK_RELEASE);
lockmgr(&chan->mutex, LK_RELEASE);
mutex_unlock(&rdev->mode_info.atom_context->scratch_mutex);
mutex_unlock(&chan->mutex);
return r;
}
......
#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
static ssize_t
radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
radeon_dp_aux_transfer_atom(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
struct radeon_i2c_chan *chan =
container_of(aux, struct radeon_i2c_chan, aux);
......
return -E2BIG;
tx_buf[0] = msg->address & 0xff;
tx_buf[1] = msg->address >> 8;
tx_buf[2] = msg->request << 4;
tx_buf[1] = (msg->address >> 8) & 0xff;
tx_buf[2] = (msg->request << 4) |
((msg->address >> 16) & 0xf);
tx_buf[3] = msg->size ? (msg->size - 1) : 0;
switch (msg->request & ~DP_AUX_I2C_MOT) {
case DP_AUX_NATIVE_WRITE:
case DP_AUX_I2C_WRITE:
case DP_AUX_I2C_WRITE_STATUS_UPDATE:
/* The atom implementation only supports writes with a max payload of
* 12 bytes since it uses 4 bits for the total count (header + payload)
* in the parameter space. The atom interface supports 16 byte
* payloads for reads. The hw itself supports up to 16 bytes of payload.
*/
if (WARN_ON_ONCE(msg->size > 12))
return -E2BIG;
/* tx_size needs to be 4 even for bare address packets since the atom
* table needs the info in tx_buf[3].
*/
......
void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
dig_connector->dp_i2c_bus->rec.hpd = radeon_connector->hpd.hpd; /* XXX check*/
dig_connector->dp_i2c_bus->aux.dev = radeon_connector->base.kdev;
dig_connector->dp_i2c_bus->aux.transfer = radeon_dp_aux_transfer;
}
int radeon_dp_i2c_aux_ch(device_t dev, int mode, u8 write_byte, u8 *read_byte)
{
struct i2c_algo_dp_aux_data *algo_data = device_get_softc(dev);
struct radeon_i2c_chan *auxch = algo_data->priv;
u16 address = algo_data->address;
u8 msg[5];
u8 reply[2];
unsigned retry;
int msg_bytes;
int reply_bytes = 1;
int ret;
u8 ack;
/* Set up the address */
msg[0] = address;
msg[1] = address >> 8;
/* Set up the command byte */
if (mode & MODE_I2C_READ) {
msg[2] = DP_AUX_I2C_READ << 4;
msg_bytes = 4;
msg[3] = msg_bytes << 4;
struct drm_device *dev = radeon_connector->base.dev;
struct radeon_device *rdev = dev->dev_private;
int ret;
radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd;
radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev;
if (ASIC_IS_DCE5(rdev)) {
if (radeon_auxch)
radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_native;
else
radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_atom;
} else {
msg[2] = DP_AUX_I2C_WRITE << 4;
msg_bytes = 5;
msg[3] = msg_bytes << 4;
msg[4] = write_byte;
radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_atom;
}
/* special handling for start/stop */
if (mode & (MODE_I2C_START | MODE_I2C_STOP))
msg[3] = 3 << 4;
/* Set MOT bit for all but stop */
if ((mode & MODE_I2C_STOP) == 0)
msg[2] |= DP_AUX_I2C_MOT << 4;
for (retry = 0; retry < 7; retry++) {
ret = radeon_process_aux_ch(auxch,
msg, msg_bytes, reply, reply_bytes, 0, &ack);
if (ret == -EBUSY)
continue;
else if (ret < 0) {
DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
return ret;
}
switch ((ack >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
case DP_AUX_NATIVE_REPLY_ACK:
/* I2C-over-AUX Reply field is only valid
* when paired with AUX ACK.
*/
break;
case DP_AUX_NATIVE_REPLY_NACK:
DRM_DEBUG_KMS("aux_ch native nack\n");
return -EREMOTEIO;
case DP_AUX_NATIVE_REPLY_DEFER:
DRM_DEBUG_KMS("aux_ch native defer\n");
usleep_range(500, 600);
continue;
default:
DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack);
return -EREMOTEIO;
}
switch ((ack >> 4) & DP_AUX_I2C_REPLY_MASK) {
case DP_AUX_I2C_REPLY_ACK:
if (mode == MODE_I2C_READ)
*read_byte = reply[0];
return (0); /* XXX: why 0 and not msg size? */
case DP_AUX_I2C_REPLY_NACK:
DRM_DEBUG_KMS("aux_i2c nack\n");
return -EREMOTEIO;
case DP_AUX_I2C_REPLY_DEFER:
DRM_DEBUG_KMS("aux_i2c defer\n");
usleep_range(400, 500);
break;
default:
DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack);
return -EREMOTEIO;
}
}
ret = drm_dp_aux_register(&radeon_connector->ddc_bus->aux);
if (!ret)
radeon_connector->ddc_bus->has_aux = true;
DRM_DEBUG_KMS("aux i2c too many retries, giving up\n");
return -EREMOTEIO;
WARN(ret, "drm_dp_aux_register() failed with error %d\n", ret);
}
/***** general DP utility functions *****/
......
#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_LEVEL_3
#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPH_LEVEL_3
static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
static void dp_get_adjust_train(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count,
u8 train_set[4])
{
......
return bpc * 3;
}
/* get the max pix clock supported by the link rate and lane num */
static int dp_get_max_dp_pix_clock(int link_rate,
int lane_num,
int bpp)
{
return (link_rate * lane_num * 8) / bpp;
}
/***** radeon specific DP functions *****/
static int radeon_dp_get_max_link_rate(struct drm_connector *connector,
u8 dpcd[DP_DPCD_SIZE])
{
int max_link_rate;
if (radeon_connector_is_dp12_capable(connector))
max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000);
else
max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000);
return max_link_rate;
}
/* First get the min lane# when low rate is used according to pixel clock
* (prefer low rate), second check max lane# supported by DP panel,
* if the max lane# < low rate lane# then use max lane# instead.
*/
static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
u8 dpcd[DP_DPCD_SIZE],
int pix_clock)
int radeon_dp_get_dp_link_config(struct drm_connector *connector,
const u8 dpcd[DP_DPCD_SIZE],
unsigned pix_clock,
unsigned *dp_lanes, unsigned *dp_rate)
{
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd);
int max_lane_num = drm_dp_max_lane_count(dpcd);
int lane_num;
int max_dp_pix_clock;
for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
max_dp_pix_clock = dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
if (pix_clock <= max_dp_pix_clock)
break;
}
return lane_num;
}
static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
u8 dpcd[DP_DPCD_SIZE],
int pix_clock)
{
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
int lane_num, max_pix_clock;
static const unsigned link_rates[3] = { 162000, 270000, 540000 };
unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
unsigned lane_num, i, max_pix_clock;
if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
ENCODER_OBJECT_ID_NUTMEG)
return 270000;
lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
max_pix_clock = dp_get_max_dp_pix_clock(162000, lane_num, bpp);
if (pix_clock <= max_pix_clock)
return 162000;
max_pix_clock = dp_get_max_dp_pix_clock(270000, lane_num, bpp);
if (pix_clock <= max_pix_clock)
return 270000;
if (radeon_connector_is_dp12_capable(connector)) {
max_pix_clock = dp_get_max_dp_pix_clock(540000, lane_num, bpp);
if (pix_clock <= max_pix_clock)
return 540000;
}
ENCODER_OBJECT_ID_NUTMEG) {
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
max_pix_clock = (lane_num * 270000 * 8) / bpp;
if (max_pix_clock >= pix_clock) {
*dp_lanes = lane_num;
*dp_rate = 270000;
return 0;
}
}
} else {
for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
if (max_pix_clock >= pix_clock) {
*dp_lanes = lane_num;
*dp_rate = link_rates[i];
return 0;
}
}
}
}
return radeon_dp_get_max_link_rate(connector, dpcd);
return -EINVAL;
}
static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
......
u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
struct drm_device *dev = radeon_connector->base.dev;
struct radeon_device *rdev = dev->dev_private;
return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
dig_connector->dp_i2c_bus->rec.i2c_id, 0);
radeon_connector->ddc_bus->rec.i2c_id, 0);
}
static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
......
if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
return;
if (drm_dp_dpcd_read(&dig_connector->dp_i2c_bus->aux, DP_SINK_OUI, buf, 3) == 3)
if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Sink OUI: %02hhx%02hhx%02hhx\n",
buf[0], buf[1], buf[2]);
if (drm_dp_dpcd_read(&dig_connector->dp_i2c_bus->aux, DP_BRANCH_OUI, buf, 3) == 3)
if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Branch OUI: %02hhx%02hhx%02hhx\n",
buf[0], buf[1], buf[2]);
}
......
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
u8 msg[DP_DPCD_SIZE];
int ret;
int ret, i;
char dpcd_hex_dump[DP_DPCD_SIZE * 3];
ret = drm_dp_dpcd_read(&dig_connector->dp_i2c_bus->aux, DP_DPCD_REV, msg,
DP_DPCD_SIZE);
if (ret > 0) {
memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
DRM_DEBUG_KMS("DPCD: %s\n", hexncpy(dig_connector->dpcd,
sizeof(dig_connector->dpcd),
dpcd_hex_dump, sizeof(dpcd_hex_dump), " "));
for (i = 0; i < 7; i++) {
ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
DP_DPCD_SIZE);
if (ret == DP_DPCD_SIZE) {
memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
DRM_DEBUG_KMS("DPCD: %s\n", hexncpy(dig_connector->dpcd,
sizeof(dig_connector->dpcd),
dpcd_hex_dump, sizeof(dpcd_hex_dump), " "));
radeon_dp_probe_oui(radeon_connector);
radeon_dp_probe_oui(radeon_connector);
return true;
return true;
}
}
dig_connector->dpcd[0] = 0;
return false;
......
if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
/* DP bridge chips */
if (drm_dp_dpcd_readb(&dig_connector->dp_i2c_bus->aux,
if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
if (tmp & 1)
panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
......
}
} else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
/* eDP */
if (drm_dp_dpcd_readb(&dig_connector->dp_i2c_bus->aux,
if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
if (tmp & 1)
panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
......
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
int ret;
if (!radeon_connector->con_priv)
return;
......
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
dig_connector->dp_clock =
radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
dig_connector->dp_lane_count =
radeon_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
mode->clock,
&dig_connector->dp_lane_count,
&dig_connector->dp_clock);
if (ret) {
dig_connector->dp_clock = 0;
dig_connector->dp_lane_count = 0;
}
}
}
......
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
int dp_clock;
unsigned dp_clock, dp_lanes;
int ret;
if ((mode->clock > 340000) &&
(!radeon_connector_is_dp12_capable(connector)))
......
return MODE_CLOCK_HIGH;
dig_connector = radeon_connector->con_priv;
dp_clock =
radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
mode->clock,
&dp_lanes,
&dp_clock);
if (ret)
return MODE_CLOCK_HIGH;
if ((dp_clock == 540000) &&
(!radeon_connector_is_dp12_capable(connector)))
......
u8 link_status[DP_LINK_STATUS_SIZE];
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
if (drm_dp_dpcd_read_link_status(&dig->dp_i2c_bus->aux, link_status) <= 0)
if (drm_dp_dpcd_read_link_status(&radeon_connector->ddc_bus->aux, link_status)
<= 0)
return false;
if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
return false;
......
/* power up/down the sink */
... This diff was truncated because it exceeds the maximum size that can be displayed.
(1-1/4)