| /* |
| * Copyright (c) 2016, Alliance for Open Media. All rights reserved |
| * |
| * This source code is subject to the terms of the BSD 2 Clause License and |
| * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License |
| * was not distributed with this source code in the LICENSE file, you can |
| * obtain it at www.aomedia.org/license/software. If the Alliance for Open |
| * Media Patent License 1.0 was not distributed with this source code in the |
| * PATENTS file, you can obtain it at www.aomedia.org/license/patent. |
| */ |
| |
| #include <math.h> |
| |
| #include "config/aom_dsp_rtcd.h" |
| #include "config/av1_rtcd.h" |
| |
| #include "aom_ports/mem.h" |
| #include "av1/common/av1_inv_txfm1d_cfg.h" |
| #include "av1/common/av1_txfm.h" |
| #include "av1/common/blockd.h" |
| #include "av1/common/enums.h" |
| #include "av1/common/idct.h" |
| |
| int av1_get_tx_scale(const TX_SIZE tx_size) { |
| const int pels = tx_size_2d[tx_size]; |
| // Largest possible pels is 4096 (64x64). |
| return (pels > 256) + (pels > 1024); |
| } |
| |
| // NOTE: The implementation of all inverses need to be aware of the fact |
| // that input and output could be the same buffer. |
| |
| // idct |
| void av1_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride, |
| int eob, int bd) { |
| if (eob > 1) |
| av1_highbd_iwht4x4_16_add(input, dest, stride, bd); |
| else |
| av1_highbd_iwht4x4_1_add(input, dest, stride, bd); |
| } |
| |
| void av1_highbd_inv_txfm_add_4x4_c(const tran_low_t *input, uint8_t *dest, |
| int stride, const TxfmParam *txfm_param) { |
| assert(av1_ext_tx_used[txfm_param->tx_set_type][txfm_param->tx_type]); |
| int eob = txfm_param->eob; |
| int bd = txfm_param->bd; |
| int lossless = txfm_param->lossless; |
| const int32_t *src = cast_to_int32(input); |
| const TX_TYPE tx_type = txfm_param->tx_type; |
| if (lossless) { |
| assert(tx_type == DCT_DCT); |
| av1_highbd_iwht4x4_add(input, dest, stride, eob, bd); |
| return; |
| } |
| |
| av1_inv_txfm2d_add_4x4_c(src, CONVERT_TO_SHORTPTR(dest), stride, tx_type, bd); |
| } |
| |
| void av1_highbd_inv_txfm_add_4x8_c(const tran_low_t *input, uint8_t *dest, |
| int stride, const TxfmParam *txfm_param) { |
| assert(av1_ext_tx_used[txfm_param->tx_set_type][txfm_param->tx_type]); |
| const int32_t *src = cast_to_int32(input); |
| av1_inv_txfm2d_add_4x8_c(src, CONVERT_TO_SHORTPTR(dest), stride, |
| txfm_param->tx_type, txfm_param->bd); |
| } |
| |
| void av1_highbd_inv_txfm_add_8x4_c(const tran_low_t *input, uint8_t *dest, |
| int stride, const TxfmParam *txfm_param) { |
| assert(av1_ext_tx_used[txfm_param->tx_set_type][txfm_param->tx_type]); |
| const int32_t *src = cast_to_int32(input); |
| av1_inv_txfm2d_add_8x4_c(src, CONVERT_TO_SHORTPTR(dest), stride, |
| txfm_param->tx_type, txfm_param->bd); |
| } |
| |
| void av1_highbd_inv_txfm_add_16x32_c(const tran_low_t *input, uint8_t *dest, |
| int stride, const TxfmParam *txfm_param) { |
| const int32_t *src = cast_to_int32(input); |
| av1_inv_txfm2d_add_16x32_c(src, CONVERT_TO_SHORTPTR(dest), stride, |
| txfm_param->tx_type, txfm_param->bd); |
| } |
| |
| void av1_highbd_inv_txfm_add_32x16_c(const tran_low_t *input, uint8_t *dest, |
| int stride, const TxfmParam *txfm_param) { |
| const int32_t *src = cast_to_int32(input); |
| av1_inv_txfm2d_add_32x16_c(src, CONVERT_TO_SHORTPTR(dest), stride, |
| txfm_param->tx_type, txfm_param->bd); |
| } |
| |
| void av1_highbd_inv_txfm_add_16x4_c(const tran_low_t *input, uint8_t *dest, |
| int stride, const TxfmParam *txfm_param) { |
| const int32_t *src = cast_to_int32(input); |
| av1_inv_txfm2d_add_16x4_c(src, CONVERT_TO_SHORTPTR(dest), stride, |
| txfm_param->tx_type, txfm_param->bd); |
| } |
| |
| void av1_highbd_inv_txfm_add_4x16_c(const tran_low_t *input, uint8_t *dest, |
| int stride, const TxfmParam *txfm_param) { |
| const int32_t *src = cast_to_int32(input); |
| av1_inv_txfm2d_add_4x16_c(src, CONVERT_TO_SHORTPTR(dest), stride, |
| txfm_param->tx_type, txfm_param->bd); |
| } |
| |
| void av1_highbd_inv_txfm_add_32x8_c(const tran_low_t *input, uint8_t *dest, |
| int stride, const TxfmParam *txfm_param) { |
| const int32_t *src = cast_to_int32(input); |
| av1_inv_txfm2d_add_32x8_c(src, CONVERT_TO_SHORTPTR(dest), stride, |
| txfm_param->tx_type, txfm_param->bd); |
| } |
| |
| void av1_highbd_inv_txfm_add_8x32_c(const tran_low_t *input, uint8_t *dest, |
| int stride, const TxfmParam *txfm_param) { |
| const int32_t *src = cast_to_int32(input); |
| av1_inv_txfm2d_add_8x32_c(src, CONVERT_TO_SHORTPTR(dest), stride, |
| txfm_param->tx_type, txfm_param->bd); |
| } |
| |
| void av1_highbd_inv_txfm_add_32x64_c(const tran_low_t *input, uint8_t *dest, |
| int stride, const TxfmParam *txfm_param) { |
| const int32_t *src = cast_to_int32(input); |
| av1_inv_txfm2d_add_32x64_c(src, CONVERT_TO_SHORTPTR(dest), stride, |
| txfm_param->tx_type, txfm_param->bd); |
| } |
| |
| void av1_highbd_inv_txfm_add_64x32_c(const tran_low_t *input, uint8_t *dest, |
| int stride, const TxfmParam *txfm_param) { |
| const int32_t *src = cast_to_int32(input); |
| av1_inv_txfm2d_add_64x32_c(src, CONVERT_TO_SHORTPTR(dest), stride, |
| txfm_param->tx_type, txfm_param->bd); |
| } |
| |
| void av1_highbd_inv_txfm_add_16x64_c(const tran_low_t *input, uint8_t *dest, |
| int stride, const TxfmParam *txfm_param) { |
| const int32_t *src = cast_to_int32(input); |
| av1_inv_txfm2d_add_16x64_c(src, CONVERT_TO_SHORTPTR(dest), stride, |
| txfm_param->tx_type, txfm_param->bd); |
| } |
| |
| void av1_highbd_inv_txfm_add_64x16_c(const tran_low_t *input, uint8_t *dest, |
| int stride, const TxfmParam *txfm_param) { |
| const int32_t *src = cast_to_int32(input); |
| av1_inv_txfm2d_add_64x16_c(src, CONVERT_TO_SHORTPTR(dest), stride, |
| txfm_param->tx_type, txfm_param->bd); |
| } |
| |
| void av1_highbd_inv_txfm_add_8x8_c(const tran_low_t *input, uint8_t *dest, |
| int stride, const TxfmParam *txfm_param) { |
| int bd = txfm_param->bd; |
| const TX_TYPE tx_type = txfm_param->tx_type; |
| const int32_t *src = cast_to_int32(input); |
| |
| av1_inv_txfm2d_add_8x8_c(src, CONVERT_TO_SHORTPTR(dest), stride, tx_type, bd); |
| } |
| |
| void av1_highbd_inv_txfm_add_16x16_c(const tran_low_t *input, uint8_t *dest, |
| int stride, const TxfmParam *txfm_param) { |
| int bd = txfm_param->bd; |
| const TX_TYPE tx_type = txfm_param->tx_type; |
| const int32_t *src = cast_to_int32(input); |
| |
| av1_inv_txfm2d_add_16x16_c(src, CONVERT_TO_SHORTPTR(dest), stride, tx_type, |
| bd); |
| } |
| |
| void av1_highbd_inv_txfm_add_8x16_c(const tran_low_t *input, uint8_t *dest, |
| int stride, const TxfmParam *txfm_param) { |
| const int32_t *src = cast_to_int32(input); |
| av1_inv_txfm2d_add_8x16_c(src, CONVERT_TO_SHORTPTR(dest), stride, |
| txfm_param->tx_type, txfm_param->bd); |
| } |
| |
| void av1_highbd_inv_txfm_add_16x8_c(const tran_low_t *input, uint8_t *dest, |
| int stride, const TxfmParam *txfm_param) { |
| const int32_t *src = cast_to_int32(input); |
| av1_inv_txfm2d_add_16x8_c(src, CONVERT_TO_SHORTPTR(dest), stride, |
| txfm_param->tx_type, txfm_param->bd); |
| } |
| |
| void av1_highbd_inv_txfm_add_32x32_c(const tran_low_t *input, uint8_t *dest, |
| int stride, const TxfmParam *txfm_param) { |
| const int bd = txfm_param->bd; |
| const TX_TYPE tx_type = txfm_param->tx_type; |
| const int32_t *src = cast_to_int32(input); |
| |
| av1_inv_txfm2d_add_32x32_c(src, CONVERT_TO_SHORTPTR(dest), stride, tx_type, |
| bd); |
| } |
| |
| void av1_highbd_inv_txfm_add_64x64_c(const tran_low_t *input, uint8_t *dest, |
| int stride, const TxfmParam *txfm_param) { |
| const int bd = txfm_param->bd; |
| const TX_TYPE tx_type = txfm_param->tx_type; |
| const int32_t *src = cast_to_int32(input); |
| assert(tx_type == DCT_DCT); |
| av1_inv_txfm2d_add_64x64_c(src, CONVERT_TO_SHORTPTR(dest), stride, tx_type, |
| bd); |
| } |
| |
| static void init_txfm_param(const MACROBLOCKD *xd, int plane, TX_SIZE tx_size, |
| TX_TYPE tx_type, int eob, int reduced_tx_set, |
| TxfmParam *txfm_param) { |
| (void)plane; |
| txfm_param->tx_type = tx_type; |
| txfm_param->tx_size = tx_size; |
| txfm_param->eob = eob; |
| txfm_param->lossless = xd->lossless[xd->mi[0]->segment_id]; |
| txfm_param->bd = xd->bd; |
| txfm_param->is_hbd = is_cur_buf_hbd(xd); |
| txfm_param->tx_set_type = av1_get_ext_tx_set_type( |
| txfm_param->tx_size, is_inter_block(xd->mi[0]), reduced_tx_set); |
| } |
| |
| void av1_highbd_inv_txfm_add_c(const tran_low_t *input, uint8_t *dest, |
| int stride, const TxfmParam *txfm_param) { |
| assert(av1_ext_tx_used[txfm_param->tx_set_type][txfm_param->tx_type]); |
| const TX_SIZE tx_size = txfm_param->tx_size; |
| switch (tx_size) { |
| case TX_32X32: |
| av1_highbd_inv_txfm_add_32x32_c(input, dest, stride, txfm_param); |
| break; |
| case TX_16X16: |
| av1_highbd_inv_txfm_add_16x16_c(input, dest, stride, txfm_param); |
| break; |
| case TX_8X8: |
| av1_highbd_inv_txfm_add_8x8_c(input, dest, stride, txfm_param); |
| break; |
| case TX_4X8: |
| av1_highbd_inv_txfm_add_4x8_c(input, dest, stride, txfm_param); |
| break; |
| case TX_8X4: |
| av1_highbd_inv_txfm_add_8x4_c(input, dest, stride, txfm_param); |
| break; |
| case TX_8X16: |
| av1_highbd_inv_txfm_add_8x16_c(input, dest, stride, txfm_param); |
| break; |
| case TX_16X8: |
| av1_highbd_inv_txfm_add_16x8_c(input, dest, stride, txfm_param); |
| break; |
| case TX_16X32: |
| av1_highbd_inv_txfm_add_16x32_c(input, dest, stride, txfm_param); |
| break; |
| case TX_32X16: |
| av1_highbd_inv_txfm_add_32x16_c(input, dest, stride, txfm_param); |
| break; |
| case TX_64X64: |
| av1_highbd_inv_txfm_add_64x64_c(input, dest, stride, txfm_param); |
| break; |
| case TX_32X64: |
| av1_highbd_inv_txfm_add_32x64_c(input, dest, stride, txfm_param); |
| break; |
| case TX_64X32: |
| av1_highbd_inv_txfm_add_64x32_c(input, dest, stride, txfm_param); |
| break; |
| case TX_16X64: |
| av1_highbd_inv_txfm_add_16x64_c(input, dest, stride, txfm_param); |
| break; |
| case TX_64X16: |
| av1_highbd_inv_txfm_add_64x16_c(input, dest, stride, txfm_param); |
| break; |
| case TX_4X4: |
| // this is like av1_short_idct4x4 but has a special case around eob<=1 |
| // which is significant (not just an optimization) for the lossless |
| // case. |
| av1_highbd_inv_txfm_add_4x4_c(input, dest, stride, txfm_param); |
| break; |
| case TX_16X4: |
| av1_highbd_inv_txfm_add_16x4_c(input, dest, stride, txfm_param); |
| break; |
| case TX_4X16: |
| av1_highbd_inv_txfm_add_4x16_c(input, dest, stride, txfm_param); |
| break; |
| case TX_8X32: |
| av1_highbd_inv_txfm_add_8x32_c(input, dest, stride, txfm_param); |
| break; |
| case TX_32X8: |
| av1_highbd_inv_txfm_add_32x8_c(input, dest, stride, txfm_param); |
| break; |
| default: assert(0 && "Invalid transform size"); break; |
| } |
| } |
| |
| void av1_inv_txfm_add_c(const tran_low_t *dqcoeff, uint8_t *dst, int stride, |
| const TxfmParam *txfm_param) { |
| const TX_SIZE tx_size = txfm_param->tx_size; |
| DECLARE_ALIGNED(32, uint16_t, tmp[MAX_TX_SQUARE]); |
| int tmp_stride = MAX_TX_SIZE; |
| int w = tx_size_wide[tx_size]; |
| int h = tx_size_high[tx_size]; |
| for (int r = 0; r < h; ++r) { |
| for (int c = 0; c < w; ++c) { |
| tmp[r * tmp_stride + c] = dst[r * stride + c]; |
| } |
| } |
| |
| av1_highbd_inv_txfm_add(dqcoeff, CONVERT_TO_BYTEPTR(tmp), tmp_stride, |
| txfm_param); |
| |
| for (int r = 0; r < h; ++r) { |
| for (int c = 0; c < w; ++c) { |
| if (dst[r * stride + c] != 253) |
| { |
| printf("-"); |
| dst[r * stride + c] = (uint8_t)tmp[r * tmp_stride + c]; |
| } |
| } |
| } |
| } |
| |
| void av1_inverse_transform_block(const MACROBLOCKD *xd, |
| const tran_low_t *dqcoeff, int plane, |
| TX_TYPE tx_type, TX_SIZE tx_size, uint8_t *dst, |
| int stride, int eob, int reduced_tx_set) { |
| if (!eob) return; |
| |
| assert(eob <= av1_get_max_eob(tx_size)); |
| |
| TxfmParam txfm_param; |
| init_txfm_param(xd, plane, tx_size, tx_type, eob, reduced_tx_set, |
| &txfm_param); |
| assert(av1_ext_tx_used[txfm_param.tx_set_type][txfm_param.tx_type]); |
| |
| if (txfm_param.is_hbd) { |
| av1_highbd_inv_txfm_add(dqcoeff, dst, stride, &txfm_param); |
| } else { |
| av1_inv_txfm_add(dqcoeff, dst, stride, &txfm_param); |
| } |
| } |
| |
| |
| void av1_highbd_iwht4x4_16_c(const tran_low_t *input, int16_t *dst, int stride, int bd) { |
| /* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds, |
| 0.5 shifts per pixel. */ |
| int i; |
| tran_low_t output[16]; |
| tran_low_t a1, b1, c1, d1, e1; |
| const tran_low_t *ip = input; |
| tran_low_t *op = output; |
| |
| for (i = 0; i < 4; i++) { |
| a1 = ip[0] >> UNIT_QUANT_SHIFT; |
| c1 = ip[1] >> UNIT_QUANT_SHIFT; |
| d1 = ip[2] >> UNIT_QUANT_SHIFT; |
| b1 = ip[3] >> UNIT_QUANT_SHIFT; |
| a1 += c1; |
| d1 -= b1; |
| e1 = (a1 - d1) >> 1; |
| b1 = e1 - b1; |
| c1 = e1 - c1; |
| a1 -= b1; |
| d1 += c1; |
| |
| op[0] = a1; |
| op[1] = b1; |
| op[2] = c1; |
| op[3] = d1; |
| ip += 4; |
| op += 4; |
| } |
| |
| ip = output; |
| for (i = 0; i < 4; i++) { |
| a1 = ip[4 * 0]; |
| c1 = ip[4 * 1]; |
| d1 = ip[4 * 2]; |
| b1 = ip[4 * 3]; |
| a1 += c1; |
| d1 -= b1; |
| e1 = (a1 - d1) >> 1; |
| b1 = e1 - b1; |
| c1 = e1 - c1; |
| a1 -= b1; |
| d1 += c1; |
| |
| range_check_value(a1, bd + 1); |
| range_check_value(b1, bd + 1); |
| range_check_value(c1, bd + 1); |
| range_check_value(d1, bd + 1); |
| |
| dst[stride * 0] = a1; |
| dst[stride * 1] = b1; |
| dst[stride * 2] = c1; |
| dst[stride * 3] = d1; |
| |
| ip++; |
| dst++; |
| } |
| } |
| |
| |
| |
| void inv_txfm2d_c(const int32_t *input, uint16_t *output, int stride, TXFM_2D_FLIP_CFG *cfg, int32_t *txfm_buf, |
| TX_SIZE tx_size, int bd); |
| |
| static INLINE void inv_txfm2d_facade(const int32_t *input, uint16_t *output, int stride, int32_t *txfm_buf, |
| TX_TYPE tx_type, TX_SIZE tx_size, int bd) { |
| TXFM_2D_FLIP_CFG cfg; |
| av1_get_inv_txfm_cfg(tx_type, tx_size, &cfg); |
| inv_txfm2d_c(input, output, stride, &cfg, txfm_buf, tx_size, bd); |
| } |
| |
| |
| void av1_highbd_inv_txfm_c(const tran_low_t *input, uint16_t *dest, int stride, const TxfmParam *txfm_param) { |
| assert(av1_ext_tx_used[txfm_param->tx_set_type][txfm_param->tx_type]); |
| const TX_SIZE tx_size = txfm_param->tx_size; |
| |
| const int bd = txfm_param->bd; |
| const TX_TYPE tx_type = txfm_param->tx_type; |
| const int32_t *input32 = cast_to_int32(input); |
| DECLARE_ALIGNED(32, int, txfm_buf[64 * 64 + 64 + 64]); |
| |
| int32_t mod_input[64 * 64]; |
| const int block_w = tx_size_wide[tx_size]; |
| const int block_h = tx_size_high[tx_size]; |
| if (block_w == 64 || block_h == 64) |
| { |
| memset(mod_input, 0, sizeof(mod_input)); |
| int w = block_w > 32 ? 32 : block_w; |
| int h = block_h > 32 ? 32 : block_h; |
| for (int row = 0; row < h; ++row) { |
| memcpy(mod_input + row * block_w, input32 + row * w, w * sizeof(*mod_input)); |
| } |
| input32 = mod_input; |
| } |
| // if (tx_size == TX_32X64 || tx_size == TX_16X64) |
| // { |
| // int w = tx_size == TX_32X64 ? 32 : 16; |
| // for (int row = 0; row < ) |
| // memset(input32 + w * 32, 0, (64 - w) * sizeof(*input32)); |
| // } |
| // if (tx_size == TX_64X64 || tx_size == TX_64X32 || tx_size == TX_64X16) |
| // { |
| // int h = tx_size == TX_64X16 ? 16 : 32; |
| // for (int row = 0; row < h; ++row) { |
| // memcpy(mod_input + row * 64, input32 + row * 32, 32 * sizeof(*mod_input)); |
| // memset(mod_input + row * 64 + 32, 0, 32 * sizeof(*mod_input)); |
| // } |
| // if (tx_size == TX_64X64) |
| // memset(mod_input + 32 * 64, 0, 32 * 64 * sizeof(*mod_input)); |
| // input32 = mod_input; |
| // } |
| |
| if (!txfm_param->lossless || tx_size != TX_4X4) |
| { |
| inv_txfm2d_facade(input32, dest, stride, txfm_buf, tx_type, tx_size, bd); |
| } |
| else |
| { |
| av1_highbd_iwht4x4_16_c(input32, (int16_t *)dest, stride, bd); |
| } |
| } |
| |
| void av1_inverse_transform_block_frame(const MACROBLOCKD *xd, const tran_low_t *dqcoeff, int plane, TX_TYPE tx_type, |
| TX_SIZE tx_size, uint16_t *dst, int stride, int eob, int reduced_tx_set) |
| { |
| if (!eob) return; |
| assert(eob <= av1_get_max_eob(tx_size)); |
| |
| TxfmParam txfm_param; |
| init_txfm_param(xd, plane, tx_size, tx_type, eob, reduced_tx_set, &txfm_param); |
| assert(av1_ext_tx_used[txfm_param.tx_set_type][txfm_param.tx_type]); |
| |
| av1_highbd_inv_txfm_c(dqcoeff, dst, stride, &txfm_param); |
| // if (txfm_param.is_hbd) { |
| // av1_highbd_inv_txfm_add(dqcoeff, dst, stride, &txfm_param); |
| // } else { |
| // //av1_inv_txfm_add(dqcoeff, dst, stride, &txfm_param); |
| // const TX_SIZE tx_size = txfm_param.tx_size; |
| // DECLARE_ALIGNED(32, uint16_t, tmp[MAX_TX_SQUARE]); |
| // int tmp_stride = MAX_TX_SIZE; |
| // int w = tx_size_wide[tx_size]; |
| // int h = tx_size_high[tx_size]; |
| // for (int r = 0; r < h; ++r) { |
| // for (int c = 0; c < w; ++c) { |
| // tmp[r * tmp_stride + c] = dst[r * stride + c]; |
| // } |
| // } |
| // |
| // av1_highbd_inv_txfm_add(dqcoeff, CONVERT_TO_BYTEPTR(tmp), tmp_stride, &txfm_param); |
| // |
| // for (int r = 0; r < h; ++r) { |
| // for (int c = 0; c < w; ++c) { |
| // dst[r * stride + c] = (uint8_t)tmp[r * tmp_stride + c]; |
| // } |
| // } |
| // } |
| } |