blob: 91040db33b2c5fa7a3ac9186880a328710ed38eb [file] [log] [blame]
Yaowu Xuc27fc142016-08-22 16:08:15 -07001/*
Yaowu Xu2ab7ff02016-09-02 12:04:54 -07002 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
Yaowu Xuc27fc142016-08-22 16:08:15 -07003 *
Yaowu Xu2ab7ff02016-09-02 12:04:54 -07004 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
Yaowu Xuc27fc142016-08-22 16:08:15 -070010 */
11
Yaowu Xuf883b422016-08-30 14:01:10 -070012#include "./av1_rtcd.h"
13#include "./aom_config.h"
14#include "./aom_dsp_rtcd.h"
Yaowu Xuc27fc142016-08-22 16:08:15 -070015
16#include "aom_dsp/quantize.h"
Yaowu Xuf883b422016-08-30 14:01:10 -070017#include "aom_mem/aom_mem.h"
Yaowu Xuc27fc142016-08-22 16:08:15 -070018#include "aom_ports/mem.h"
19
20#include "av1/common/idct.h"
21#include "av1/common/reconinter.h"
22#include "av1/common/reconintra.h"
23#include "av1/common/scan.h"
24
25#include "av1/encoder/encodemb.h"
26#include "av1/encoder/hybrid_fwd_txfm.h"
27#include "av1/encoder/quantize.h"
28#include "av1/encoder/rd.h"
29#include "av1/encoder/tokenize.h"
30
Yushin Cho77bba8d2016-11-04 16:36:56 -070031#if CONFIG_PVQ
32#include "av1/encoder/encint.h"
33#include "av1/common/partition.h"
34#include "av1/encoder/pvq_encoder.h"
35#endif
36
Yaowu Xuf883b422016-08-30 14:01:10 -070037void av1_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
Yaowu Xuc27fc142016-08-22 16:08:15 -070038 struct macroblock_plane *const p = &x->plane[plane];
39 const struct macroblockd_plane *const pd = &x->e_mbd.plane[plane];
40 const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
41 const int bw = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
42 const int bh = 4 * num_4x4_blocks_high_lookup[plane_bsize];
43
Yaowu Xuf883b422016-08-30 14:01:10 -070044#if CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -070045 if (x->e_mbd.cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
Yaowu Xuf883b422016-08-30 14:01:10 -070046 aom_highbd_subtract_block(bh, bw, p->src_diff, bw, p->src.buf,
Yaowu Xuc27fc142016-08-22 16:08:15 -070047 p->src.stride, pd->dst.buf, pd->dst.stride,
48 x->e_mbd.bd);
49 return;
50 }
Yaowu Xuf883b422016-08-30 14:01:10 -070051#endif // CONFIG_AOM_HIGHBITDEPTH
52 aom_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride,
Yaowu Xuc27fc142016-08-22 16:08:15 -070053 pd->dst.buf, pd->dst.stride);
54}
55
Yaowu Xuf883b422016-08-30 14:01:10 -070056typedef struct av1_token_state {
Yaowu Xuc27fc142016-08-22 16:08:15 -070057 int rate;
58 int64_t error;
59 int next;
60 int16_t token;
61 tran_low_t qc;
62 tran_low_t dqc;
Yaowu Xuf883b422016-08-30 14:01:10 -070063} av1_token_state;
Yaowu Xuc27fc142016-08-22 16:08:15 -070064
65// These numbers are empirically obtained.
66static const int plane_rd_mult[REF_TYPES][PLANE_TYPES] = {
67 { 10, 6 }, { 8, 5 },
68};
69
70#define UPDATE_RD_COST() \
71 { \
72 rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0); \
73 rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1); \
74 }
75
Angie Chiangff6d8902016-10-21 11:02:09 -070076int av1_optimize_b(const AV1_COMMON *cm, MACROBLOCK *mb, int plane, int block,
77 TX_SIZE tx_size, int ctx) {
Yaowu Xuc27fc142016-08-22 16:08:15 -070078 MACROBLOCKD *const xd = &mb->e_mbd;
79 struct macroblock_plane *const p = &mb->plane[plane];
80 struct macroblockd_plane *const pd = &xd->plane[plane];
81 const int ref = is_inter_block(&xd->mi[0]->mbmi);
Yaowu Xuf883b422016-08-30 14:01:10 -070082 av1_token_state tokens[MAX_TX_SQUARE + 1][2];
Yaowu Xuc27fc142016-08-22 16:08:15 -070083 unsigned best_index[MAX_TX_SQUARE + 1][2];
84 uint8_t token_cache[MAX_TX_SQUARE];
85 const tran_low_t *const coeff = BLOCK_OFFSET(mb->plane[plane].coeff, block);
86 tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
87 tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
88 const int eob = p->eobs[block];
Debargha Mukherjee3c42c092016-09-29 09:17:36 -070089 const PLANE_TYPE plane_type = pd->plane_type;
Jingning Hande953b92016-10-25 12:35:43 -070090 const int default_eob = tx_size_2d[tx_size];
Yaowu Xuc27fc142016-08-22 16:08:15 -070091 const int16_t *const dequant_ptr = pd->dequant;
92 const uint8_t *const band_translate = get_band_translate(tx_size);
Debargha Mukherjee3c42c092016-09-29 09:17:36 -070093 TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
Urvang Joshi03f6fdc2016-10-14 15:53:39 -070094 const SCAN_ORDER *const scan_order =
Angie Chiangff6d8902016-10-21 11:02:09 -070095 get_scan(cm, tx_size, tx_type, is_inter_block(&xd->mi[0]->mbmi));
Urvang Joshi03f6fdc2016-10-14 15:53:39 -070096 const int16_t *const scan = scan_order->scan;
97 const int16_t *const nb = scan_order->neighbors;
Yaowu Xuc27fc142016-08-22 16:08:15 -070098#if CONFIG_AOM_QM
99 int seg_id = xd->mi[0]->mbmi.segment_id;
Debargha Mukherjee3c42c092016-09-29 09:17:36 -0700100 const qm_val_t *iqmatrix = pd->seg_iqmatrix[seg_id][!ref][tx_size];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700101#endif
102 const int shift = get_tx_scale(xd, tx_type, tx_size);
103#if CONFIG_NEW_QUANT
David Barkerd7d78c82016-10-24 10:55:35 +0100104 int dq = get_dq_profile_from_ctx(mb->qindex, ctx, ref, plane_type);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700105 const dequant_val_type_nuq *dequant_val = pd->dequant_val_nuq[dq];
106#else
107 const int dq_step[2] = { dequant_ptr[0] >> shift, dequant_ptr[1] >> shift };
108#endif // CONFIG_NEW_QUANT
109 int next = eob, sz = 0;
Debargha Mukherjee3c42c092016-09-29 09:17:36 -0700110 const int64_t rdmult = (mb->rdmult * plane_rd_mult[ref][plane_type]) >> 1;
Yaowu Xuc27fc142016-08-22 16:08:15 -0700111 const int64_t rddiv = mb->rddiv;
112 int64_t rd_cost0, rd_cost1;
113 int rate0, rate1;
114 int64_t error0, error1;
115 int16_t t0, t1;
116 int best, band = (eob < default_eob) ? band_translate[eob]
117 : band_translate[eob - 1];
118 int pt, i, final_eob;
Yaowu Xuf883b422016-08-30 14:01:10 -0700119#if CONFIG_AOM_HIGHBITDEPTH
120 const int *cat6_high_cost = av1_get_high_cost_table(xd->bd);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700121#else
Yaowu Xuf883b422016-08-30 14:01:10 -0700122 const int *cat6_high_cost = av1_get_high_cost_table(8);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700123#endif
124 unsigned int(*token_costs)[2][COEFF_CONTEXTS][ENTROPY_TOKENS] =
Debargha Mukherjee3c42c092016-09-29 09:17:36 -0700125 mb->token_costs[txsize_sqr_map[tx_size]][plane_type][ref];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700126 const uint16_t *band_counts = &band_count_table[tx_size][band];
127 uint16_t band_left = eob - band_cum_count_table[tx_size][band] + 1;
128 int shortcut = 0;
129 int next_shortcut = 0;
130
David Barkerd7d78c82016-10-24 10:55:35 +0100131 assert((mb->qindex == 0) ^ (xd->lossless[xd->mi[0]->mbmi.segment_id] == 0));
Debargha Mukherjee3c42c092016-09-29 09:17:36 -0700132
Yaowu Xuc27fc142016-08-22 16:08:15 -0700133 token_costs += band;
134
Debargha Mukherjee3c42c092016-09-29 09:17:36 -0700135 assert((!plane_type && !plane) || (plane_type && plane));
Yaowu Xuc27fc142016-08-22 16:08:15 -0700136 assert(eob <= default_eob);
137
138 /* Now set up a Viterbi trellis to evaluate alternative roundings. */
139 /* Initialize the sentinel node of the trellis. */
140 tokens[eob][0].rate = 0;
141 tokens[eob][0].error = 0;
142 tokens[eob][0].next = default_eob;
143 tokens[eob][0].token = EOB_TOKEN;
144 tokens[eob][0].qc = 0;
145 tokens[eob][1] = tokens[eob][0];
146
147 for (i = 0; i < eob; i++) {
148 const int rc = scan[i];
Yaowu Xuf883b422016-08-30 14:01:10 -0700149 tokens[i][0].rate = av1_get_token_cost(qcoeff[rc], &t0, cat6_high_cost);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700150 tokens[i][0].token = t0;
Yaowu Xuf883b422016-08-30 14:01:10 -0700151 token_cache[rc] = av1_pt_energy_class[t0];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700152 }
153
154 for (i = eob; i-- > 0;) {
155 int base_bits, dx;
156 int64_t d2;
157 const int rc = scan[i];
158#if CONFIG_AOM_QM
159 int iwt = iqmatrix[rc];
160#endif
161 int x = qcoeff[rc];
162 next_shortcut = shortcut;
163
164 /* Only add a trellis state for non-zero coefficients. */
165 if (UNLIKELY(x)) {
166 error0 = tokens[next][0].error;
167 error1 = tokens[next][1].error;
168 /* Evaluate the first possibility for this state. */
169 rate0 = tokens[next][0].rate;
170 rate1 = tokens[next][1].rate;
171
172 if (next_shortcut) {
173 /* Consider both possible successor states. */
174 if (next < default_eob) {
175 pt = get_coef_context(nb, token_cache, i + 1);
176 rate0 += (*token_costs)[0][pt][tokens[next][0].token];
177 rate1 += (*token_costs)[0][pt][tokens[next][1].token];
178 }
179 UPDATE_RD_COST();
180 /* And pick the best. */
181 best = rd_cost1 < rd_cost0;
182 } else {
183 if (next < default_eob) {
184 pt = get_coef_context(nb, token_cache, i + 1);
185 rate0 += (*token_costs)[0][pt][tokens[next][0].token];
186 }
187 best = 0;
188 }
189
190 dx = (dqcoeff[rc] - coeff[rc]) * (1 << shift);
Yaowu Xuf883b422016-08-30 14:01:10 -0700191#if CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -0700192 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
193 dx >>= xd->bd - 8;
194 }
Yaowu Xuf883b422016-08-30 14:01:10 -0700195#endif // CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -0700196 d2 = (int64_t)dx * dx;
197 tokens[i][0].rate += (best ? rate1 : rate0);
198 tokens[i][0].error = d2 + (best ? error1 : error0);
199 tokens[i][0].next = next;
200 tokens[i][0].qc = x;
201 tokens[i][0].dqc = dqcoeff[rc];
202 best_index[i][0] = best;
203
204 /* Evaluate the second possibility for this state. */
205 rate0 = tokens[next][0].rate;
206 rate1 = tokens[next][1].rate;
207
208 // The threshold of 3 is empirically obtained.
209 if (UNLIKELY(abs(x) > 3)) {
210 shortcut = 0;
211 } else {
212#if CONFIG_NEW_QUANT
Yaowu Xuf883b422016-08-30 14:01:10 -0700213 shortcut = ((av1_dequant_abscoeff_nuq(abs(x), dequant_ptr[rc != 0],
214 dequant_val[band_translate[i]]) >
Yaowu Xuc27fc142016-08-22 16:08:15 -0700215 (abs(coeff[rc]) << shift)) &&
Yaowu Xuf883b422016-08-30 14:01:10 -0700216 (av1_dequant_abscoeff_nuq(abs(x) - 1, dequant_ptr[rc != 0],
217 dequant_val[band_translate[i]]) <
Yaowu Xuc27fc142016-08-22 16:08:15 -0700218 (abs(coeff[rc]) << shift)));
219#else // CONFIG_NEW_QUANT
220#if CONFIG_AOM_QM
221 if ((abs(x) * dequant_ptr[rc != 0] * iwt >
222 ((abs(coeff[rc]) << shift) << AOM_QM_BITS)) &&
223 (abs(x) * dequant_ptr[rc != 0] * iwt <
224 (((abs(coeff[rc]) << shift) + dequant_ptr[rc != 0])
225 << AOM_QM_BITS)))
226#else
227 if ((abs(x) * dequant_ptr[rc != 0] > (abs(coeff[rc]) << shift)) &&
228 (abs(x) * dequant_ptr[rc != 0] <
229 (abs(coeff[rc]) << shift) + dequant_ptr[rc != 0]))
230#endif // CONFIG_AOM_QM
231 shortcut = 1;
232 else
233 shortcut = 0;
234#endif // CONFIG_NEW_QUANT
235 }
236
237 if (shortcut) {
238 sz = -(x < 0);
239 x -= 2 * sz + 1;
240 } else {
241 tokens[i][1] = tokens[i][0];
242 best_index[i][1] = best_index[i][0];
243 next = i;
244
245 if (UNLIKELY(!(--band_left))) {
246 --band_counts;
247 band_left = *band_counts;
248 --token_costs;
249 }
250 continue;
251 }
252
253 /* Consider both possible successor states. */
254 if (!x) {
255 /* If we reduced this coefficient to zero, check to see if
256 * we need to move the EOB back here.
257 */
258 t0 = tokens[next][0].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN;
259 t1 = tokens[next][1].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN;
260 base_bits = 0;
261 } else {
Yaowu Xuf883b422016-08-30 14:01:10 -0700262 base_bits = av1_get_token_cost(x, &t0, cat6_high_cost);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700263 t1 = t0;
264 }
265
266 if (next_shortcut) {
267 if (LIKELY(next < default_eob)) {
268 if (t0 != EOB_TOKEN) {
Yaowu Xuf883b422016-08-30 14:01:10 -0700269 token_cache[rc] = av1_pt_energy_class[t0];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700270 pt = get_coef_context(nb, token_cache, i + 1);
271 rate0 += (*token_costs)[!x][pt][tokens[next][0].token];
272 }
273 if (t1 != EOB_TOKEN) {
Yaowu Xuf883b422016-08-30 14:01:10 -0700274 token_cache[rc] = av1_pt_energy_class[t1];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700275 pt = get_coef_context(nb, token_cache, i + 1);
276 rate1 += (*token_costs)[!x][pt][tokens[next][1].token];
277 }
278 }
279
280 UPDATE_RD_COST();
281 /* And pick the best. */
282 best = rd_cost1 < rd_cost0;
283 } else {
284 // The two states in next stage are identical.
285 if (next < default_eob && t0 != EOB_TOKEN) {
Yaowu Xuf883b422016-08-30 14:01:10 -0700286 token_cache[rc] = av1_pt_energy_class[t0];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700287 pt = get_coef_context(nb, token_cache, i + 1);
288 rate0 += (*token_costs)[!x][pt][tokens[next][0].token];
289 }
290 best = 0;
291 }
292
293#if CONFIG_NEW_QUANT
Yaowu Xuf883b422016-08-30 14:01:10 -0700294 dx = av1_dequant_coeff_nuq(x, dequant_ptr[rc != 0],
295 dequant_val[band_translate[i]]) -
Yaowu Xuc27fc142016-08-22 16:08:15 -0700296 (coeff[rc] << shift);
Yaowu Xuf883b422016-08-30 14:01:10 -0700297#if CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -0700298 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
299 dx >>= xd->bd - 8;
300 }
Yaowu Xuf883b422016-08-30 14:01:10 -0700301#endif // CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -0700302#else // CONFIG_NEW_QUANT
Yaowu Xuf883b422016-08-30 14:01:10 -0700303#if CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -0700304 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
305 dx -= ((dequant_ptr[rc != 0] >> (xd->bd - 8)) + sz) ^ sz;
306 } else {
307 dx -= (dequant_ptr[rc != 0] + sz) ^ sz;
308 }
309#else
310 dx -= (dequant_ptr[rc != 0] + sz) ^ sz;
Yaowu Xuf883b422016-08-30 14:01:10 -0700311#endif // CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -0700312#endif // CONFIG_NEW_QUANT
313 d2 = (int64_t)dx * dx;
314
315 tokens[i][1].rate = base_bits + (best ? rate1 : rate0);
316 tokens[i][1].error = d2 + (best ? error1 : error0);
317 tokens[i][1].next = next;
318 tokens[i][1].token = best ? t1 : t0;
319 tokens[i][1].qc = x;
320
321 if (x) {
322#if CONFIG_NEW_QUANT
Yaowu Xuf883b422016-08-30 14:01:10 -0700323 tokens[i][1].dqc = av1_dequant_abscoeff_nuq(
Yaowu Xuc27fc142016-08-22 16:08:15 -0700324 abs(x), dequant_ptr[rc != 0], dequant_val[band_translate[i]]);
325 tokens[i][1].dqc = shift ? ROUND_POWER_OF_TWO(tokens[i][1].dqc, shift)
326 : tokens[i][1].dqc;
327 if (sz) tokens[i][1].dqc = -tokens[i][1].dqc;
328#else
329 tran_low_t offset = dq_step[rc != 0];
330 // The 32x32 transform coefficient uses half quantization step size.
331 // Account for the rounding difference in the dequantized coefficeint
332 // value when the quantization index is dropped from an even number
333 // to an odd number.
334 if (shift & x) offset += (dequant_ptr[rc != 0] & 0x01);
335
336 if (sz == 0)
337 tokens[i][1].dqc = dqcoeff[rc] - offset;
338 else
339 tokens[i][1].dqc = dqcoeff[rc] + offset;
340#endif // CONFIG_NEW_QUANT
341 } else {
342 tokens[i][1].dqc = 0;
343 }
344
345 best_index[i][1] = best;
346 /* Finally, make this the new head of the trellis. */
347 next = i;
348 } else {
349 /* There's no choice to make for a zero coefficient, so we don't
350 * add a new trellis node, but we do need to update the costs.
351 */
352 t0 = tokens[next][0].token;
353 t1 = tokens[next][1].token;
354 pt = get_coef_context(nb, token_cache, i + 1);
355 /* Update the cost of each path if we're past the EOB token. */
356 if (t0 != EOB_TOKEN) {
357 tokens[next][0].rate += (*token_costs)[1][pt][t0];
358 tokens[next][0].token = ZERO_TOKEN;
359 }
360 if (t1 != EOB_TOKEN) {
361 tokens[next][1].rate += (*token_costs)[1][pt][t1];
362 tokens[next][1].token = ZERO_TOKEN;
363 }
364 best_index[i][0] = best_index[i][1] = 0;
365 shortcut = (tokens[next][0].rate != tokens[next][1].rate);
366 /* Don't update next, because we didn't add a new node. */
367 }
368
369 if (UNLIKELY(!(--band_left))) {
370 --band_counts;
371 band_left = *band_counts;
372 --token_costs;
373 }
374 }
375
376 /* Now pick the best path through the whole trellis. */
377 rate0 = tokens[next][0].rate;
378 rate1 = tokens[next][1].rate;
379 error0 = tokens[next][0].error;
380 error1 = tokens[next][1].error;
381 t0 = tokens[next][0].token;
382 t1 = tokens[next][1].token;
383 rate0 += (*token_costs)[0][ctx][t0];
384 rate1 += (*token_costs)[0][ctx][t1];
385 UPDATE_RD_COST();
386 best = rd_cost1 < rd_cost0;
387
388 final_eob = -1;
389
390 for (i = next; i < eob; i = next) {
391 const int x = tokens[i][best].qc;
392 const int rc = scan[i];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700393 if (x) final_eob = i;
394 qcoeff[rc] = x;
395 dqcoeff[rc] = tokens[i][best].dqc;
396
397 next = tokens[i][best].next;
398 best = best_index[i][best];
399 }
400 final_eob++;
401
402 mb->plane[plane].eobs[block] = final_eob;
403 assert(final_eob <= default_eob);
404 return final_eob;
405}
406
Yaowu Xuf883b422016-08-30 14:01:10 -0700407#if CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -0700408typedef enum QUANT_FUNC {
409 QUANT_FUNC_LOWBD = 0,
410 QUANT_FUNC_HIGHBD = 1,
411 QUANT_FUNC_LAST = 2
412} QUANT_FUNC;
413
Yaowu Xuf883b422016-08-30 14:01:10 -0700414static AV1_QUANT_FACADE quant_func_list[AV1_XFORM_QUANT_LAST][QUANT_FUNC_LAST] =
415 { { av1_quantize_fp_facade, av1_highbd_quantize_fp_facade },
416 { av1_quantize_b_facade, av1_highbd_quantize_b_facade },
417 { av1_quantize_dc_facade, av1_highbd_quantize_dc_facade },
418 { NULL, NULL } };
Yaowu Xuc27fc142016-08-22 16:08:15 -0700419
Yaowu Xu02d4c3b2016-11-07 10:45:56 -0800420#elif !CONFIG_PVQ
421
Yaowu Xuc27fc142016-08-22 16:08:15 -0700422typedef enum QUANT_FUNC {
423 QUANT_FUNC_LOWBD = 0,
424 QUANT_FUNC_LAST = 1
425} QUANT_FUNC;
426
clang-format67948d32016-09-07 22:40:40 -0700427static AV1_QUANT_FACADE quant_func_list[AV1_XFORM_QUANT_LAST]
428 [QUANT_FUNC_LAST] = {
429 { av1_quantize_fp_facade },
430 { av1_quantize_b_facade },
431 { av1_quantize_dc_facade },
432 { NULL }
433 };
Yaowu Xuc27fc142016-08-22 16:08:15 -0700434#endif
435
Yaowu Xuf883b422016-08-30 14:01:10 -0700436static FWD_TXFM_OPT fwd_txfm_opt_list[AV1_XFORM_QUANT_LAST] = {
Yaowu Xuc27fc142016-08-22 16:08:15 -0700437 FWD_TXFM_OPT_NORMAL, FWD_TXFM_OPT_NORMAL, FWD_TXFM_OPT_DC, FWD_TXFM_OPT_NORMAL
438};
439
Angie Chiangff6d8902016-10-21 11:02:09 -0700440void av1_xform_quant(const AV1_COMMON *cm, MACROBLOCK *x, int plane, int block,
441 int blk_row, int blk_col, BLOCK_SIZE plane_bsize,
442 TX_SIZE tx_size, AV1_XFORM_QUANT xform_quant_idx) {
Yaowu Xuc27fc142016-08-22 16:08:15 -0700443 MACROBLOCKD *const xd = &x->e_mbd;
Yushin Cho77bba8d2016-11-04 16:36:56 -0700444#if !CONFIG_PVQ
Yaowu Xuc27fc142016-08-22 16:08:15 -0700445 const struct macroblock_plane *const p = &x->plane[plane];
446 const struct macroblockd_plane *const pd = &xd->plane[plane];
Yushin Cho77bba8d2016-11-04 16:36:56 -0700447#else
448 struct macroblock_plane *const p = &x->plane[plane];
449 struct macroblockd_plane *const pd = &xd->plane[plane];
450#endif
Yaowu Xuc27fc142016-08-22 16:08:15 -0700451 PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
452 TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
Debargha Mukherjee3c42c092016-09-29 09:17:36 -0700453 const int is_inter = is_inter_block(&xd->mi[0]->mbmi);
Angie Chiangff6d8902016-10-21 11:02:09 -0700454 const SCAN_ORDER *const scan_order = get_scan(cm, tx_size, tx_type, is_inter);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700455 tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
456 tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
457 tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
458 uint16_t *const eob = &p->eobs[block];
459 const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
460#if CONFIG_AOM_QM
461 int seg_id = xd->mi[0]->mbmi.segment_id;
Debargha Mukherjee3c42c092016-09-29 09:17:36 -0700462 const qm_val_t *qmatrix = pd->seg_qmatrix[seg_id][!is_inter][tx_size];
463 const qm_val_t *iqmatrix = pd->seg_iqmatrix[seg_id][!is_inter][tx_size];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700464#endif
Yaowu Xuc27fc142016-08-22 16:08:15 -0700465
466 FWD_TXFM_PARAM fwd_txfm_param;
Yushin Cho77bba8d2016-11-04 16:36:56 -0700467
468#if !CONFIG_PVQ
469 const int tx2d_size = tx_size_2d[tx_size];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700470 QUANT_PARAM qparam;
Yushin Cho77bba8d2016-11-04 16:36:56 -0700471 const int16_t *src_diff;
472
473 src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
474 qparam.log_scale = get_tx_scale(xd, tx_type, tx_size);
475#else
476 MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
477 tran_low_t *ref_coeff = BLOCK_OFFSET(pd->pvq_ref_coeff, block);
478 uint8_t *src, *dst;
479 int16_t *src_int16, *pred;
480 const int src_stride = p->src.stride;
481 const int dst_stride = pd->dst.stride;
482 int tx_blk_size;
483 int i, j;
484 int skip = 1;
485 PVQ_INFO *pvq_info = NULL;
486
487 (void)scan_order;
488 (void)qcoeff;
489
490 if (x->pvq_coded) {
491 assert(block < MAX_PVQ_BLOCKS_IN_SB);
492 pvq_info = &x->pvq[block][plane];
493 }
494 dst = &pd->dst.buf[4 * (blk_row * dst_stride + blk_col)];
495 src = &p->src.buf[4 * (blk_row * src_stride + blk_col)];
496 src_int16 = &p->src_int16[4 * (blk_row * diff_stride + blk_col)];
497 pred = &pd->pred[4 * (blk_row * diff_stride + blk_col)];
498
499 // transform block size in pixels
500 tx_blk_size = tx_size_wide[tx_size];
501
502 // copy uint8 orig and predicted block to int16 buffer
503 // in order to use existing VP10 transform functions
504 for (j = 0; j < tx_blk_size; j++)
505 for (i = 0; i < tx_blk_size; i++) {
506 src_int16[diff_stride * j + i] = src[src_stride * j + i];
507 pred[diff_stride * j + i] = dst[dst_stride * j + i];
508 }
509#endif
Yaowu Xuc27fc142016-08-22 16:08:15 -0700510
511 fwd_txfm_param.tx_type = tx_type;
512 fwd_txfm_param.tx_size = tx_size;
513 fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[xform_quant_idx];
514 fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
515 fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
516
Yaowu Xuf883b422016-08-30 14:01:10 -0700517#if CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -0700518 fwd_txfm_param.bd = xd->bd;
519 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
520 highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
Yaowu Xuf883b422016-08-30 14:01:10 -0700521 if (xform_quant_idx != AV1_XFORM_QUANT_SKIP_QUANT) {
Yaowu Xuc27fc142016-08-22 16:08:15 -0700522 if (LIKELY(!x->skip_block)) {
523 quant_func_list[xform_quant_idx][QUANT_FUNC_HIGHBD](
524 coeff, tx2d_size, p, qcoeff, pd, dqcoeff, eob, scan_order, &qparam
525#if CONFIG_AOM_QM
526 ,
527 qmatrix, iqmatrix
528#endif // CONFIG_AOM_QM
529 );
530 } else {
Yaowu Xuf883b422016-08-30 14:01:10 -0700531 av1_quantize_skip(tx2d_size, qcoeff, dqcoeff, eob);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700532 }
533 }
534 return;
535 }
Yaowu Xuf883b422016-08-30 14:01:10 -0700536#endif // CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -0700537
Yushin Cho77bba8d2016-11-04 16:36:56 -0700538#if !CONFIG_PVQ
Yaowu Xuc27fc142016-08-22 16:08:15 -0700539 fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
Yaowu Xuf883b422016-08-30 14:01:10 -0700540 if (xform_quant_idx != AV1_XFORM_QUANT_SKIP_QUANT) {
Yaowu Xuc27fc142016-08-22 16:08:15 -0700541 if (LIKELY(!x->skip_block)) {
542 quant_func_list[xform_quant_idx][QUANT_FUNC_LOWBD](
543 coeff, tx2d_size, p, qcoeff, pd, dqcoeff, eob, scan_order, &qparam
544#if CONFIG_AOM_QM
545 ,
546 qmatrix, iqmatrix
547#endif // CONFIG_AOM_QM
548 );
549 } else {
Yaowu Xuf883b422016-08-30 14:01:10 -0700550 av1_quantize_skip(tx2d_size, qcoeff, dqcoeff, eob);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700551 }
552 }
Yushin Cho77bba8d2016-11-04 16:36:56 -0700553#else // #if !CONFIG_PVQ
554 fwd_txfm_param.rd_transform = 0;
555
556 fwd_txfm(src_int16, coeff, diff_stride, &fwd_txfm_param);
557 fwd_txfm(pred, ref_coeff, diff_stride, &fwd_txfm_param);
558
559 // PVQ for inter mode block
560 if (!x->skip_block)
561 skip = av1_pvq_encode_helper(&x->daala_enc,
562 coeff, // target original vector
563 ref_coeff, // reference vector
564 dqcoeff, // de-quantized vector
565 eob, // End of Block marker
566 pd->dequant, // aom's quantizers
567 plane, // image plane
568 tx_size, // block size in log_2 - 2
569 tx_type,
570 &x->rate, // rate measured
571 x->pvq_speed,
572 pvq_info); // PVQ info for a block
573
574 x->pvq_skip[plane] = skip;
575
576 if (!skip) mbmi->skip = 0;
577#endif // #if !CONFIG_PVQ
Yaowu Xuc27fc142016-08-22 16:08:15 -0700578}
579
580#if CONFIG_NEW_QUANT
Angie Chiangff6d8902016-10-21 11:02:09 -0700581void av1_xform_quant_nuq(const AV1_COMMON *cm, MACROBLOCK *x, int plane,
582 int block, int blk_row, int blk_col,
583 BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int ctx) {
Yaowu Xuc27fc142016-08-22 16:08:15 -0700584 MACROBLOCKD *const xd = &x->e_mbd;
585 const struct macroblock_plane *const p = &x->plane[plane];
586 const struct macroblockd_plane *const pd = &xd->plane[plane];
587 PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
588 TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
Debargha Mukherjee9324d382016-09-23 10:52:13 -0700589 const int is_inter = is_inter_block(&xd->mi[0]->mbmi);
Angie Chiangff6d8902016-10-21 11:02:09 -0700590 const SCAN_ORDER *const scan_order = get_scan(cm, tx_size, tx_type, is_inter);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700591 tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
592 tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
593 tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
David Barkerd7d78c82016-10-24 10:55:35 +0100594 int dq = get_dq_profile_from_ctx(x->qindex, ctx, is_inter, plane_type);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700595 uint16_t *const eob = &p->eobs[block];
596 const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
597 const int16_t *src_diff;
598 const uint8_t *band = get_band_translate(tx_size);
599
600 FWD_TXFM_PARAM fwd_txfm_param;
601
David Barkerd7d78c82016-10-24 10:55:35 +0100602 assert((x->qindex == 0) ^ (xd->lossless[xd->mi[0]->mbmi.segment_id] == 0));
Debargha Mukherjee3c42c092016-09-29 09:17:36 -0700603
Yaowu Xuc27fc142016-08-22 16:08:15 -0700604 fwd_txfm_param.tx_type = tx_type;
605 fwd_txfm_param.tx_size = tx_size;
Yaowu Xuf883b422016-08-30 14:01:10 -0700606 fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[AV1_XFORM_QUANT_FP];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700607 fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
608 fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
609
610 src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
611
612// TODO(sarahparker) add all of these new quant quantize functions
613// to quant_func_list, just trying to get this expr to work for now
Yaowu Xuf883b422016-08-30 14:01:10 -0700614#if CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -0700615 fwd_txfm_param.bd = xd->bd;
616 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
617 highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
618 if (tx_size == TX_32X32) {
619 highbd_quantize_32x32_nuq(
Jingning Han7e992972016-10-31 11:03:06 -0700620 coeff, tx_size_2d[tx_size], x->skip_block, p->quant, p->quant_shift,
621 pd->dequant, (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
Yaowu Xuc27fc142016-08-22 16:08:15 -0700622 (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq], qcoeff,
623 dqcoeff, eob, scan_order->scan, band);
624 } else {
Jingning Han7e992972016-10-31 11:03:06 -0700625 highbd_quantize_nuq(coeff, tx_size_2d[tx_size], x->skip_block, p->quant,
626 p->quant_shift, pd->dequant,
Yaowu Xuc27fc142016-08-22 16:08:15 -0700627 (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
628 (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq],
629 qcoeff, dqcoeff, eob, scan_order->scan, band);
630 }
631 return;
632 }
Yaowu Xuf883b422016-08-30 14:01:10 -0700633#endif // CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -0700634
635 fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
636 if (tx_size == TX_32X32) {
637 quantize_32x32_nuq(coeff, 1024, x->skip_block, p->quant, p->quant_shift,
638 pd->dequant,
639 (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
640 (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq],
641 qcoeff, dqcoeff, eob, scan_order->scan, band);
642 } else {
Jingning Han7e992972016-10-31 11:03:06 -0700643 quantize_nuq(coeff, tx_size_2d[tx_size], x->skip_block, p->quant,
Yaowu Xuc27fc142016-08-22 16:08:15 -0700644 p->quant_shift, pd->dequant,
645 (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
646 (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq], qcoeff,
647 dqcoeff, eob, scan_order->scan, band);
648 }
649}
650
Angie Chiangff6d8902016-10-21 11:02:09 -0700651void av1_xform_quant_fp_nuq(const AV1_COMMON *cm, MACROBLOCK *x, int plane,
652 int block, int blk_row, int blk_col,
653 BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int ctx) {
Yaowu Xuc27fc142016-08-22 16:08:15 -0700654 MACROBLOCKD *const xd = &x->e_mbd;
655 const struct macroblock_plane *const p = &x->plane[plane];
656 const struct macroblockd_plane *const pd = &xd->plane[plane];
Debargha Mukherjee9324d382016-09-23 10:52:13 -0700657 const int is_inter = is_inter_block(&xd->mi[0]->mbmi);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700658 PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
659 TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
Angie Chiangff6d8902016-10-21 11:02:09 -0700660 const SCAN_ORDER *const scan_order = get_scan(cm, tx_size, tx_type, is_inter);
David Barkerd7d78c82016-10-24 10:55:35 +0100661 int dq = get_dq_profile_from_ctx(x->qindex, ctx, is_inter, plane_type);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700662 tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
663 tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
664 tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
665 uint16_t *const eob = &p->eobs[block];
666 const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
667 const int16_t *src_diff;
668 const uint8_t *band = get_band_translate(tx_size);
669
670 FWD_TXFM_PARAM fwd_txfm_param;
671
David Barkerd7d78c82016-10-24 10:55:35 +0100672 assert((x->qindex == 0) ^ (xd->lossless[xd->mi[0]->mbmi.segment_id] == 0));
Debargha Mukherjee3c42c092016-09-29 09:17:36 -0700673
Yaowu Xuc27fc142016-08-22 16:08:15 -0700674 fwd_txfm_param.tx_type = tx_type;
675 fwd_txfm_param.tx_size = tx_size;
Yaowu Xuf883b422016-08-30 14:01:10 -0700676 fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[AV1_XFORM_QUANT_FP];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700677 fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
678 fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
679
680 src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
681
682// TODO(sarahparker) add all of these new quant quantize functions
683// to quant_func_list, just trying to get this expr to work for now
Yaowu Xuf883b422016-08-30 14:01:10 -0700684#if CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -0700685 fwd_txfm_param.bd = xd->bd;
686 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
687 highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
688 if (tx_size == TX_32X32) {
689 highbd_quantize_32x32_fp_nuq(
Jingning Han7e992972016-10-31 11:03:06 -0700690 coeff, tx_size_2d[tx_size], x->skip_block, p->quant_fp, pd->dequant,
691 (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
Yaowu Xuc27fc142016-08-22 16:08:15 -0700692 (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq], qcoeff,
693 dqcoeff, eob, scan_order->scan, band);
694 } else {
695 highbd_quantize_fp_nuq(
Jingning Han7e992972016-10-31 11:03:06 -0700696 coeff, tx_size_2d[tx_size], x->skip_block, p->quant_fp, pd->dequant,
697 (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
Yaowu Xuc27fc142016-08-22 16:08:15 -0700698 (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq], qcoeff,
699 dqcoeff, eob, scan_order->scan, band);
700 }
701 return;
702 }
Yaowu Xuf883b422016-08-30 14:01:10 -0700703#endif // CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -0700704
705 fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
706 if (tx_size == TX_32X32) {
Jingning Han7e992972016-10-31 11:03:06 -0700707 quantize_32x32_fp_nuq(coeff, tx_size_2d[tx_size], x->skip_block,
Yaowu Xuc27fc142016-08-22 16:08:15 -0700708 p->quant_fp, pd->dequant,
709 (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
710 (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq],
711 qcoeff, dqcoeff, eob, scan_order->scan, band);
712 } else {
Jingning Han7e992972016-10-31 11:03:06 -0700713 quantize_fp_nuq(coeff, tx_size_2d[tx_size], x->skip_block, p->quant_fp,
Yaowu Xuc27fc142016-08-22 16:08:15 -0700714 pd->dequant,
715 (const cuml_bins_type_nuq *)p->cuml_bins_nuq[dq],
716 (const dequant_val_type_nuq *)pd->dequant_val_nuq[dq],
717 qcoeff, dqcoeff, eob, scan_order->scan, band);
718 }
719}
720
Yaowu Xuf883b422016-08-30 14:01:10 -0700721void av1_xform_quant_dc_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
722 int blk_col, BLOCK_SIZE plane_bsize,
723 TX_SIZE tx_size, int ctx) {
Yaowu Xuc27fc142016-08-22 16:08:15 -0700724 MACROBLOCKD *const xd = &x->e_mbd;
725 const struct macroblock_plane *const p = &x->plane[plane];
726 const struct macroblockd_plane *const pd = &xd->plane[plane];
727 PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
728 TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
729 tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
730 tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
731 tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
732 uint16_t *const eob = &p->eobs[block];
733 const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
734 const int16_t *src_diff;
Debargha Mukherjee9324d382016-09-23 10:52:13 -0700735 const int is_inter = is_inter_block(&xd->mi[0]->mbmi);
David Barkerd7d78c82016-10-24 10:55:35 +0100736 int dq = get_dq_profile_from_ctx(x->qindex, ctx, is_inter, plane_type);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700737
738 FWD_TXFM_PARAM fwd_txfm_param;
739
David Barkerd7d78c82016-10-24 10:55:35 +0100740 assert((x->qindex == 0) ^ (xd->lossless[xd->mi[0]->mbmi.segment_id] == 0));
Debargha Mukherjee3c42c092016-09-29 09:17:36 -0700741
Yaowu Xuc27fc142016-08-22 16:08:15 -0700742 fwd_txfm_param.tx_type = tx_type;
743 fwd_txfm_param.tx_size = tx_size;
Yaowu Xuf883b422016-08-30 14:01:10 -0700744 fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[AV1_XFORM_QUANT_DC];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700745 fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
746 fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
747
748 src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
749
750// TODO(sarahparker) add all of these new quant quantize functions
751// to quant_func_list, just trying to get this expr to work for now
Yaowu Xuf883b422016-08-30 14:01:10 -0700752#if CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -0700753 fwd_txfm_param.bd = xd->bd;
754 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
755 highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
756 if (tx_size == TX_32X32) {
757 highbd_quantize_dc_32x32_nuq(
Jingning Han7e992972016-10-31 11:03:06 -0700758 coeff, tx_size_2d[tx_size], x->skip_block, p->quant[0],
Yaowu Xuc27fc142016-08-22 16:08:15 -0700759 p->quant_shift[0], pd->dequant[0], p->cuml_bins_nuq[dq][0],
760 pd->dequant_val_nuq[dq][0], qcoeff, dqcoeff, eob);
761 } else {
Jingning Han7e992972016-10-31 11:03:06 -0700762 highbd_quantize_dc_nuq(coeff, tx_size_2d[tx_size], x->skip_block,
Yaowu Xuc27fc142016-08-22 16:08:15 -0700763 p->quant[0], p->quant_shift[0], pd->dequant[0],
764 p->cuml_bins_nuq[dq][0],
765 pd->dequant_val_nuq[dq][0], qcoeff, dqcoeff, eob);
766 }
767 return;
768 }
Yaowu Xuf883b422016-08-30 14:01:10 -0700769#endif // CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -0700770
771 fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
772 if (tx_size == TX_32X32) {
Jingning Han7e992972016-10-31 11:03:06 -0700773 quantize_dc_32x32_nuq(coeff, tx_size_2d[tx_size], x->skip_block,
Yaowu Xuc27fc142016-08-22 16:08:15 -0700774 p->quant[0], p->quant_shift[0], pd->dequant[0],
775 p->cuml_bins_nuq[dq][0], pd->dequant_val_nuq[dq][0],
776 qcoeff, dqcoeff, eob);
777 } else {
Jingning Han7e992972016-10-31 11:03:06 -0700778 quantize_dc_nuq(coeff, tx_size_2d[tx_size], x->skip_block, p->quant[0],
Yaowu Xuc27fc142016-08-22 16:08:15 -0700779 p->quant_shift[0], pd->dequant[0], p->cuml_bins_nuq[dq][0],
780 pd->dequant_val_nuq[dq][0], qcoeff, dqcoeff, eob);
781 }
782}
783
Yaowu Xuf883b422016-08-30 14:01:10 -0700784void av1_xform_quant_dc_fp_nuq(MACROBLOCK *x, int plane, int block, int blk_row,
785 int blk_col, BLOCK_SIZE plane_bsize,
786 TX_SIZE tx_size, int ctx) {
Yaowu Xuc27fc142016-08-22 16:08:15 -0700787 MACROBLOCKD *const xd = &x->e_mbd;
788 const struct macroblock_plane *const p = &x->plane[plane];
789 const struct macroblockd_plane *const pd = &xd->plane[plane];
790 PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
791 TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
792 tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
793 tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
794 tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
795 uint16_t *const eob = &p->eobs[block];
796 const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
797 const int16_t *src_diff;
Debargha Mukherjee9324d382016-09-23 10:52:13 -0700798 const int is_inter = is_inter_block(&xd->mi[0]->mbmi);
David Barkerd7d78c82016-10-24 10:55:35 +0100799 int dq = get_dq_profile_from_ctx(x->qindex, ctx, is_inter, plane_type);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700800
801 FWD_TXFM_PARAM fwd_txfm_param;
802
David Barkerd7d78c82016-10-24 10:55:35 +0100803 assert((x->qindex == 0) ^ (xd->lossless[xd->mi[0]->mbmi.segment_id] == 0));
Debargha Mukherjee3c42c092016-09-29 09:17:36 -0700804
Yaowu Xuc27fc142016-08-22 16:08:15 -0700805 fwd_txfm_param.tx_type = tx_type;
806 fwd_txfm_param.tx_size = tx_size;
Yaowu Xuf883b422016-08-30 14:01:10 -0700807 fwd_txfm_param.fwd_txfm_opt = fwd_txfm_opt_list[AV1_XFORM_QUANT_DC];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700808 fwd_txfm_param.rd_transform = x->use_lp32x32fdct;
809 fwd_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
810
811 src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
812
813// TODO(sarahparker) add all of these new quant quantize functions
814// to quant_func_list, just trying to get this expr to work for now
Yaowu Xuf883b422016-08-30 14:01:10 -0700815#if CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -0700816 fwd_txfm_param.bd = xd->bd;
817 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
818 highbd_fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
819 if (tx_size == TX_32X32) {
820 highbd_quantize_dc_32x32_fp_nuq(
Jingning Han7e992972016-10-31 11:03:06 -0700821 coeff, tx_size_2d[tx_size], x->skip_block, p->quant_fp[0],
Yaowu Xuc27fc142016-08-22 16:08:15 -0700822 pd->dequant[0], p->cuml_bins_nuq[dq][0], pd->dequant_val_nuq[dq][0],
823 qcoeff, dqcoeff, eob);
824 } else {
825 highbd_quantize_dc_fp_nuq(
Jingning Han7e992972016-10-31 11:03:06 -0700826 coeff, tx_size_2d[tx_size], x->skip_block, p->quant_fp[0],
Yaowu Xuc27fc142016-08-22 16:08:15 -0700827 pd->dequant[0], p->cuml_bins_nuq[dq][0], pd->dequant_val_nuq[dq][0],
828 qcoeff, dqcoeff, eob);
829 }
830 return;
831 }
Yaowu Xuf883b422016-08-30 14:01:10 -0700832#endif // CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -0700833
834 fwd_txfm(src_diff, coeff, diff_stride, &fwd_txfm_param);
835 if (tx_size == TX_32X32) {
Jingning Han7e992972016-10-31 11:03:06 -0700836 quantize_dc_32x32_fp_nuq(coeff, tx_size_2d[tx_size], x->skip_block,
Yaowu Xuc27fc142016-08-22 16:08:15 -0700837 p->quant_fp[0], pd->dequant[0],
838 p->cuml_bins_nuq[dq][0],
839 pd->dequant_val_nuq[dq][0], qcoeff, dqcoeff, eob);
840 } else {
Jingning Han7e992972016-10-31 11:03:06 -0700841 quantize_dc_fp_nuq(coeff, tx_size_2d[tx_size], x->skip_block,
Yaowu Xuc27fc142016-08-22 16:08:15 -0700842 p->quant_fp[0], pd->dequant[0], p->cuml_bins_nuq[dq][0],
843 pd->dequant_val_nuq[dq][0], qcoeff, dqcoeff, eob);
844 }
845}
846#endif // CONFIG_NEW_QUANT
847
848static void encode_block(int plane, int block, int blk_row, int blk_col,
849 BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
850 struct encode_b_args *const args = arg;
Angie Chiangff6d8902016-10-21 11:02:09 -0700851 AV1_COMMON *cm = args->cm;
Yaowu Xuc27fc142016-08-22 16:08:15 -0700852 MACROBLOCK *const x = args->x;
853 MACROBLOCKD *const xd = &x->e_mbd;
854 int ctx;
855 struct macroblock_plane *const p = &x->plane[plane];
856 struct macroblockd_plane *const pd = &xd->plane[plane];
857 tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
858 uint8_t *dst;
859 ENTROPY_CONTEXT *a, *l;
860 INV_TXFM_PARAM inv_txfm_param;
Yushin Cho77bba8d2016-11-04 16:36:56 -0700861#if CONFIG_PVQ
862 int tx_blk_size;
863 int i, j;
864#endif
Yaowu Xuc27fc142016-08-22 16:08:15 -0700865#if CONFIG_VAR_TX
866 int i;
867 const int bwl = b_width_log2_lookup[plane_bsize];
868#endif
869 dst = &pd->dst.buf[4 * blk_row * pd->dst.stride + 4 * blk_col];
870 a = &args->ta[blk_col];
871 l = &args->tl[blk_row];
872#if CONFIG_VAR_TX
873 ctx = get_entropy_context(tx_size, a, l);
874#else
875 ctx = combine_entropy_contexts(*a, *l);
876#endif
877
878#if CONFIG_VAR_TX
Yue Chena1e48dc2016-08-29 17:29:33 -0700879 // Assert not magic number (uninitialized).
Yaowu Xuc27fc142016-08-22 16:08:15 -0700880 assert(x->blk_skip[plane][(blk_row << bwl) + blk_col] != 234);
881
882 if (x->blk_skip[plane][(blk_row << bwl) + blk_col] == 0) {
883#else
884 {
885#endif
886#if CONFIG_NEW_QUANT
Angie Chiangff6d8902016-10-21 11:02:09 -0700887 av1_xform_quant_fp_nuq(cm, x, plane, block, blk_row, blk_col, plane_bsize,
Yaowu Xuf883b422016-08-30 14:01:10 -0700888 tx_size, ctx);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700889#else
Angie Chiangff6d8902016-10-21 11:02:09 -0700890 av1_xform_quant(cm, x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
Yaowu Xuf883b422016-08-30 14:01:10 -0700891 AV1_XFORM_QUANT_FP);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700892#endif // CONFIG_NEW_QUANT
893 }
894#if CONFIG_VAR_TX
895 else {
896 p->eobs[block] = 0;
897 }
898#endif
Yushin Cho77bba8d2016-11-04 16:36:56 -0700899#if !CONFIG_PVQ
Yaowu Xuc27fc142016-08-22 16:08:15 -0700900 if (p->eobs[block]) {
Angie Chiangff6d8902016-10-21 11:02:09 -0700901 *a = *l = av1_optimize_b(cm, x, plane, block, tx_size, ctx) > 0;
Yaowu Xuc27fc142016-08-22 16:08:15 -0700902 } else {
903 *a = *l = p->eobs[block] > 0;
904 }
905
906#if CONFIG_VAR_TX
Jingning Hande953b92016-10-25 12:35:43 -0700907 for (i = 0; i < tx_size_wide_unit[tx_size]; ++i) a[i] = a[0];
908
909 for (i = 0; i < tx_size_high_unit[tx_size]; ++i) l[i] = l[0];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700910#endif
911
912 if (p->eobs[block]) *(args->skip) = 0;
913
914 if (p->eobs[block] == 0) return;
Yushin Cho77bba8d2016-11-04 16:36:56 -0700915#else
916 (void)ctx;
917 *a = *l = !x->pvq_skip[plane];
918
919 if (!x->pvq_skip[plane]) *(args->skip) = 0;
920
921 if (x->pvq_skip[plane]) return;
922
923 // transform block size in pixels
924 tx_blk_size = tx_size_wide[tx_size];
925
926 // Since av1 does not have separate function which does inverse transform
927 // but av1_inv_txfm_add_*x*() also does addition of predicted image to
928 // inverse transformed image,
929 // pass blank dummy image to av1_inv_txfm_add_*x*(), i.e. set dst as zeros
930 for (j = 0; j < tx_blk_size; j++)
931 for (i = 0; i < tx_blk_size; i++) dst[j * pd->dst.stride + i] = 0;
932#endif
Yaowu Xuc27fc142016-08-22 16:08:15 -0700933
934 // inverse transform parameters
935 inv_txfm_param.tx_type = get_tx_type(pd->plane_type, xd, block, tx_size);
936 inv_txfm_param.tx_size = tx_size;
937 inv_txfm_param.eob = p->eobs[block];
938 inv_txfm_param.lossless = xd->lossless[xd->mi[0]->mbmi.segment_id];
939
Yaowu Xuf883b422016-08-30 14:01:10 -0700940#if CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -0700941 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
942 inv_txfm_param.bd = xd->bd;
943 highbd_inv_txfm_add(dqcoeff, dst, pd->dst.stride, &inv_txfm_param);
944 return;
945 }
Yaowu Xuf883b422016-08-30 14:01:10 -0700946#endif // CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -0700947 inv_txfm_add(dqcoeff, dst, pd->dst.stride, &inv_txfm_param);
948}
949
950#if CONFIG_VAR_TX
951static void encode_block_inter(int plane, int block, int blk_row, int blk_col,
952 BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
953 void *arg) {
954 struct encode_b_args *const args = arg;
955 MACROBLOCK *const x = args->x;
956 MACROBLOCKD *const xd = &x->e_mbd;
957 MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
958 const BLOCK_SIZE bsize = txsize_to_bsize[tx_size];
959 const struct macroblockd_plane *const pd = &xd->plane[plane];
960 const int tx_row = blk_row >> (1 - pd->subsampling_y);
961 const int tx_col = blk_col >> (1 - pd->subsampling_x);
962 TX_SIZE plane_tx_size;
Jingning Hanf65b8702016-10-31 12:13:20 -0700963 const int max_blocks_high = max_block_high(xd, plane_bsize, plane);
964 const int max_blocks_wide = max_block_wide(xd, plane_bsize, plane);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700965
966 if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
967
Debargha Mukherjee2f123402016-08-30 17:43:38 -0700968 plane_tx_size =
969 plane ? uv_txsize_lookup[bsize][mbmi->inter_tx_size[tx_row][tx_col]][0][0]
970 : mbmi->inter_tx_size[tx_row][tx_col];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700971
972 if (tx_size == plane_tx_size) {
973 encode_block(plane, block, blk_row, blk_col, plane_bsize, tx_size, arg);
974 } else {
Jingning Hana9336322016-11-02 15:45:07 -0700975 const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
976 // This is the square transform block partition entry point.
977 int bsl = tx_size_wide_unit[sub_txs];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700978 int i;
Yaowu Xuc27fc142016-08-22 16:08:15 -0700979 assert(bsl > 0);
Yaowu Xuc27fc142016-08-22 16:08:15 -0700980#if CONFIG_EXT_TX
981 assert(tx_size < TX_SIZES);
982#endif // CONFIG_EXT_TX
983
984 for (i = 0; i < 4; ++i) {
Jingning Hande953b92016-10-25 12:35:43 -0700985 const int offsetr = blk_row + ((i >> 1) * bsl);
986 const int offsetc = blk_col + ((i & 0x01) * bsl);
Jingning Hande953b92016-10-25 12:35:43 -0700987 int step = tx_size_wide_unit[sub_txs] * tx_size_high_unit[sub_txs];
Yaowu Xuc27fc142016-08-22 16:08:15 -0700988
989 if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue;
990
Jingning Han98d6a1f2016-11-03 12:47:47 -0700991 encode_block_inter(plane, block, offsetr, offsetc, plane_bsize, sub_txs,
992 arg);
993 block += step;
Yaowu Xuc27fc142016-08-22 16:08:15 -0700994 }
995 }
996}
997#endif
998
Angie Chiangff6d8902016-10-21 11:02:09 -0700999typedef struct encode_block_pass1_args {
1000 AV1_COMMON *cm;
1001 MACROBLOCK *x;
1002} encode_block_pass1_args;
1003
Yaowu Xuc27fc142016-08-22 16:08:15 -07001004static void encode_block_pass1(int plane, int block, int blk_row, int blk_col,
1005 BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
1006 void *arg) {
Angie Chiangff6d8902016-10-21 11:02:09 -07001007 encode_block_pass1_args *args = (encode_block_pass1_args *)arg;
1008 AV1_COMMON *cm = args->cm;
1009 MACROBLOCK *const x = args->x;
Yaowu Xuc27fc142016-08-22 16:08:15 -07001010 MACROBLOCKD *const xd = &x->e_mbd;
1011 struct macroblock_plane *const p = &x->plane[plane];
1012 struct macroblockd_plane *const pd = &xd->plane[plane];
1013 tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
1014 uint8_t *dst;
1015#if CONFIG_NEW_QUANT
1016 int ctx;
1017#endif // CONFIG_NEW_QUANT
1018 dst = &pd->dst.buf[4 * blk_row * pd->dst.stride + 4 * blk_col];
1019
1020#if CONFIG_NEW_QUANT
1021 ctx = 0;
Angie Chiangff6d8902016-10-21 11:02:09 -07001022 av1_xform_quant_fp_nuq(cm, x, plane, block, blk_row, blk_col, plane_bsize,
Yaowu Xuf883b422016-08-30 14:01:10 -07001023 tx_size, ctx);
Yaowu Xuc27fc142016-08-22 16:08:15 -07001024#else
Angie Chiangff6d8902016-10-21 11:02:09 -07001025 av1_xform_quant(cm, x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
Yaowu Xuf883b422016-08-30 14:01:10 -07001026 AV1_XFORM_QUANT_B);
Yaowu Xuc27fc142016-08-22 16:08:15 -07001027#endif // CONFIG_NEW_QUANT
Yushin Cho77bba8d2016-11-04 16:36:56 -07001028#if !CONFIG_PVQ
Yaowu Xuc27fc142016-08-22 16:08:15 -07001029 if (p->eobs[block] > 0) {
Yushin Cho77bba8d2016-11-04 16:36:56 -07001030#else
1031 if (!x->pvq_skip[plane]) {
1032#endif
1033#if CONFIG_PVQ
1034 {
1035 int tx_blk_size;
1036 int i, j;
1037 // transform block size in pixels
1038 tx_blk_size = tx_size_wide[tx_size];
1039
1040 // Since av1 does not have separate function which does inverse transform
1041 // but av1_inv_txfm_add_*x*() also does addition of predicted image to
1042 // inverse transformed image,
1043 // pass blank dummy image to av1_inv_txfm_add_*x*(), i.e. set dst as zeros
1044 for (j = 0; j < tx_blk_size; j++)
1045 for (i = 0; i < tx_blk_size; i++) dst[j * pd->dst.stride + i] = 0;
1046 }
1047#endif
Yaowu Xuf883b422016-08-30 14:01:10 -07001048#if CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -07001049 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1050 if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
Yaowu Xuf883b422016-08-30 14:01:10 -07001051 av1_highbd_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
1052 xd->bd);
Yaowu Xuc27fc142016-08-22 16:08:15 -07001053 } else {
Yaowu Xuf883b422016-08-30 14:01:10 -07001054 av1_highbd_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
1055 xd->bd);
Yaowu Xuc27fc142016-08-22 16:08:15 -07001056 }
1057 return;
1058 }
Yaowu Xuf883b422016-08-30 14:01:10 -07001059#endif // CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -07001060 if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
Yaowu Xuf883b422016-08-30 14:01:10 -07001061 av1_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
Yaowu Xuc27fc142016-08-22 16:08:15 -07001062 } else {
Yaowu Xuf883b422016-08-30 14:01:10 -07001063 av1_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
Yaowu Xuc27fc142016-08-22 16:08:15 -07001064 }
1065 }
1066}
1067
Angie Chiangff6d8902016-10-21 11:02:09 -07001068void av1_encode_sby_pass1(AV1_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE bsize) {
1069 encode_block_pass1_args args = { cm, x };
Yaowu Xuf883b422016-08-30 14:01:10 -07001070 av1_subtract_plane(x, bsize, 0);
1071 av1_foreach_transformed_block_in_plane(&x->e_mbd, bsize, 0,
Angie Chiangff6d8902016-10-21 11:02:09 -07001072 encode_block_pass1, &args);
Yaowu Xuc27fc142016-08-22 16:08:15 -07001073}
1074
Angie Chiangff6d8902016-10-21 11:02:09 -07001075void av1_encode_sb(AV1_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE bsize) {
Yaowu Xuc27fc142016-08-22 16:08:15 -07001076 MACROBLOCKD *const xd = &x->e_mbd;
1077 struct optimize_ctx ctx;
1078 MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
Angie Chiangff6d8902016-10-21 11:02:09 -07001079 struct encode_b_args arg = { cm, x, &ctx, &mbmi->skip, NULL, NULL, 1 };
Yaowu Xuc27fc142016-08-22 16:08:15 -07001080 int plane;
1081
1082 mbmi->skip = 1;
1083
1084 if (x->skip) return;
1085
1086 for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
1087#if CONFIG_VAR_TX
1088 // TODO(jingning): Clean this up.
1089 const struct macroblockd_plane *const pd = &xd->plane[plane];
1090 const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
Jingning Hande953b92016-10-25 12:35:43 -07001091 const int mi_width = block_size_wide[plane_bsize] >> tx_size_wide_log2[0];
1092 const int mi_height = block_size_high[plane_bsize] >> tx_size_wide_log2[0];
Yaowu Xuc27fc142016-08-22 16:08:15 -07001093 const TX_SIZE max_tx_size = max_txsize_lookup[plane_bsize];
1094 const BLOCK_SIZE txb_size = txsize_to_bsize[max_tx_size];
Jingning Hande953b92016-10-25 12:35:43 -07001095 const int bw = block_size_wide[txb_size] >> tx_size_wide_log2[0];
1096 const int bh = block_size_high[txb_size] >> tx_size_wide_log2[0];
Yaowu Xuc27fc142016-08-22 16:08:15 -07001097 int idx, idy;
1098 int block = 0;
Jingning Hande953b92016-10-25 12:35:43 -07001099 int step = tx_size_wide_unit[max_tx_size] * tx_size_high_unit[max_tx_size];
Yaowu Xuf883b422016-08-30 14:01:10 -07001100 av1_get_entropy_contexts(bsize, TX_4X4, pd, ctx.ta[plane], ctx.tl[plane]);
Yaowu Xuc27fc142016-08-22 16:08:15 -07001101#else
1102 const struct macroblockd_plane *const pd = &xd->plane[plane];
1103 const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
Yaowu Xuf883b422016-08-30 14:01:10 -07001104 av1_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane], ctx.tl[plane]);
Yaowu Xuc27fc142016-08-22 16:08:15 -07001105#endif
Yushin Cho77bba8d2016-11-04 16:36:56 -07001106#if !CONFIG_PVQ
Yaowu Xuf883b422016-08-30 14:01:10 -07001107 av1_subtract_plane(x, bsize, plane);
Yushin Cho77bba8d2016-11-04 16:36:56 -07001108#endif
Yaowu Xuc27fc142016-08-22 16:08:15 -07001109 arg.ta = ctx.ta[plane];
1110 arg.tl = ctx.tl[plane];
1111
1112#if CONFIG_VAR_TX
1113#if CONFIG_EXT_TX && CONFIG_RECT_TX
Yue Chena1e48dc2016-08-29 17:29:33 -07001114 if (is_rect_tx(mbmi->tx_size)) {
Yaowu Xuf883b422016-08-30 14:01:10 -07001115 av1_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
1116 &arg);
Yaowu Xuc27fc142016-08-22 16:08:15 -07001117 } else {
1118#endif
1119 for (idy = 0; idy < mi_height; idy += bh) {
Jingning Hande953b92016-10-25 12:35:43 -07001120 for (idx = 0; idx < mi_width; idx += bw) {
Yaowu Xuc27fc142016-08-22 16:08:15 -07001121 encode_block_inter(plane, block, idy, idx, plane_bsize, max_tx_size,
1122 &arg);
1123 block += step;
1124 }
1125 }
1126#if CONFIG_EXT_TX && CONFIG_RECT_TX
1127 }
1128#endif
1129#else
Yaowu Xuf883b422016-08-30 14:01:10 -07001130 av1_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
1131 &arg);
Yaowu Xuc27fc142016-08-22 16:08:15 -07001132#endif
1133 }
1134}
1135
1136#if CONFIG_SUPERTX
Angie Chiangff6d8902016-10-21 11:02:09 -07001137void av1_encode_sb_supertx(AV1_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE bsize) {
Yaowu Xuc27fc142016-08-22 16:08:15 -07001138 MACROBLOCKD *const xd = &x->e_mbd;
1139 struct optimize_ctx ctx;
1140 MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
Angie Chiangff6d8902016-10-21 11:02:09 -07001141 struct encode_b_args arg = { cm, x, &ctx, &mbmi->skip, NULL, NULL, 1 };
Yaowu Xuc27fc142016-08-22 16:08:15 -07001142 int plane;
1143
1144 mbmi->skip = 1;
1145 if (x->skip) return;
1146
1147 for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
1148 const struct macroblockd_plane *const pd = &xd->plane[plane];
1149#if CONFIG_VAR_TX
1150 const TX_SIZE tx_size = TX_4X4;
1151#else
1152 const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
1153#endif
Yaowu Xuf883b422016-08-30 14:01:10 -07001154 av1_subtract_plane(x, bsize, plane);
1155 av1_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane], ctx.tl[plane]);
Yaowu Xuc27fc142016-08-22 16:08:15 -07001156 arg.ta = ctx.ta[plane];
1157 arg.tl = ctx.tl[plane];
Yaowu Xuf883b422016-08-30 14:01:10 -07001158 av1_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
1159 &arg);
Yaowu Xuc27fc142016-08-22 16:08:15 -07001160 }
1161}
1162#endif // CONFIG_SUPERTX
1163
Yaowu Xuf883b422016-08-30 14:01:10 -07001164void av1_encode_block_intra(int plane, int block, int blk_row, int blk_col,
1165 BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
1166 void *arg) {
Yaowu Xuc27fc142016-08-22 16:08:15 -07001167 struct encode_b_args *const args = arg;
Yaowu Xud6ea71c2016-11-07 10:24:14 -08001168#if !CONFIG_PVQ
Angie Chiangff6d8902016-10-21 11:02:09 -07001169 AV1_COMMON *cm = args->cm;
Yushin Cho77bba8d2016-11-04 16:36:56 -07001170#endif
Yaowu Xuc27fc142016-08-22 16:08:15 -07001171 MACROBLOCK *const x = args->x;
1172 MACROBLOCKD *const xd = &x->e_mbd;
1173 MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
1174 struct macroblock_plane *const p = &x->plane[plane];
1175 struct macroblockd_plane *const pd = &xd->plane[plane];
1176 tran_low_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
1177 PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
1178 const TX_TYPE tx_type = get_tx_type(plane_type, xd, block, tx_size);
1179 PREDICTION_MODE mode;
Jingning Han62a2b9e2016-10-24 10:32:37 -07001180 const int diff_stride = block_size_wide[plane_bsize];
Yaowu Xuc27fc142016-08-22 16:08:15 -07001181 uint8_t *src, *dst;
1182 int16_t *src_diff;
1183 uint16_t *eob = &p->eobs[block];
1184 const int src_stride = p->src.stride;
1185 const int dst_stride = pd->dst.stride;
Jingning Han62a2b9e2016-10-24 10:32:37 -07001186 const int tx1d_width = tx_size_wide[tx_size];
1187 const int tx1d_height = tx_size_high[tx_size];
Yushin Cho77bba8d2016-11-04 16:36:56 -07001188#if !CONFIG_PVQ
Yaowu Xuc27fc142016-08-22 16:08:15 -07001189 ENTROPY_CONTEXT *a = NULL, *l = NULL;
1190 int ctx;
Yaowu Xuc27fc142016-08-22 16:08:15 -07001191 INV_TXFM_PARAM inv_txfm_param;
Yushin Cho77bba8d2016-11-04 16:36:56 -07001192#else
1193 FWD_TXFM_PARAM fwd_txfm_param;
1194 tran_low_t *coeff = BLOCK_OFFSET(p->coeff, block);
1195 tran_low_t *ref_coeff = BLOCK_OFFSET(pd->pvq_ref_coeff, block);
1196 int16_t *src_int16;
1197 int tx_blk_size;
1198 int i, j;
1199 int16_t *pred = &pd->pred[4 * (blk_row * diff_stride + blk_col)];
1200 int skip = 1;
1201 PVQ_INFO *pvq_info = NULL;
1202 int seg_id = xd->mi[0]->mbmi.segment_id;
1203
1204 if (x->pvq_coded) {
1205 assert(block < MAX_PVQ_BLOCKS_IN_SB);
1206 pvq_info = &x->pvq[block][plane];
1207 }
1208 src_int16 = &p->src_int16[4 * (blk_row * diff_stride + blk_col)];
1209#endif
Yaowu Xuc27fc142016-08-22 16:08:15 -07001210
1211 assert(tx1d_width == tx1d_height);
1212
1213 dst = &pd->dst.buf[4 * (blk_row * dst_stride + blk_col)];
1214 src = &p->src.buf[4 * (blk_row * src_stride + blk_col)];
1215 src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
Yaowu Xuc27fc142016-08-22 16:08:15 -07001216 mode = plane == 0 ? get_y_mode(xd->mi[0], block) : mbmi->uv_mode;
Jingning Hanc4c99da2016-10-24 10:27:28 -07001217 av1_predict_intra_block(xd, pd->width, pd->height, tx_size, mode, dst,
1218 dst_stride, dst, dst_stride, blk_col, blk_row, plane);
Yaowu Xuf883b422016-08-30 14:01:10 -07001219#if CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -07001220 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
Yaowu Xuf883b422016-08-30 14:01:10 -07001221 aom_highbd_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride,
Yaowu Xuc27fc142016-08-22 16:08:15 -07001222 src, src_stride, dst, dst_stride, xd->bd);
1223 } else {
Yaowu Xuf883b422016-08-30 14:01:10 -07001224 aom_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride, src,
Yaowu Xuc27fc142016-08-22 16:08:15 -07001225 src_stride, dst, dst_stride);
1226 }
1227#else
Yaowu Xuf883b422016-08-30 14:01:10 -07001228 aom_subtract_block(tx1d_height, tx1d_width, src_diff, diff_stride, src,
Yaowu Xuc27fc142016-08-22 16:08:15 -07001229 src_stride, dst, dst_stride);
Yaowu Xuf883b422016-08-30 14:01:10 -07001230#endif // CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -07001231
Yushin Cho77bba8d2016-11-04 16:36:56 -07001232#if !CONFIG_PVQ
Yaowu Xuc27fc142016-08-22 16:08:15 -07001233 a = &args->ta[blk_col];
1234 l = &args->tl[blk_row];
1235 ctx = combine_entropy_contexts(*a, *l);
1236
1237 if (args->enable_optimize_b) {
1238#if CONFIG_NEW_QUANT
Angie Chiangff6d8902016-10-21 11:02:09 -07001239 av1_xform_quant_fp_nuq(cm, x, plane, block, blk_row, blk_col, plane_bsize,
Yaowu Xuf883b422016-08-30 14:01:10 -07001240 tx_size, ctx);
Yaowu Xuc27fc142016-08-22 16:08:15 -07001241#else // CONFIG_NEW_QUANT
Angie Chiangff6d8902016-10-21 11:02:09 -07001242 av1_xform_quant(cm, x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
Yaowu Xuf883b422016-08-30 14:01:10 -07001243 AV1_XFORM_QUANT_FP);
Yaowu Xuc27fc142016-08-22 16:08:15 -07001244#endif // CONFIG_NEW_QUANT
1245 if (p->eobs[block]) {
Angie Chiangff6d8902016-10-21 11:02:09 -07001246 *a = *l = av1_optimize_b(cm, x, plane, block, tx_size, ctx) > 0;
Yaowu Xuc27fc142016-08-22 16:08:15 -07001247 } else {
1248 *a = *l = 0;
1249 }
1250 } else {
Angie Chiangff6d8902016-10-21 11:02:09 -07001251 av1_xform_quant(cm, x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
Yaowu Xuf883b422016-08-30 14:01:10 -07001252 AV1_XFORM_QUANT_B);
Yaowu Xuc27fc142016-08-22 16:08:15 -07001253 *a = *l = p->eobs[block] > 0;
1254 }
1255
1256 if (*eob) {
1257 // inverse transform
1258 inv_txfm_param.tx_type = tx_type;
1259 inv_txfm_param.tx_size = tx_size;
1260 inv_txfm_param.eob = *eob;
1261 inv_txfm_param.lossless = xd->lossless[mbmi->segment_id];
Yaowu Xuf883b422016-08-30 14:01:10 -07001262#if CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -07001263 inv_txfm_param.bd = xd->bd;
1264 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
1265 highbd_inv_txfm_add(dqcoeff, dst, dst_stride, &inv_txfm_param);
1266 } else {
1267 inv_txfm_add(dqcoeff, dst, dst_stride, &inv_txfm_param);
1268 }
1269#else
1270 inv_txfm_add(dqcoeff, dst, dst_stride, &inv_txfm_param);
Yaowu Xuf883b422016-08-30 14:01:10 -07001271#endif // CONFIG_AOM_HIGHBITDEPTH
Yaowu Xuc27fc142016-08-22 16:08:15 -07001272
1273 *(args->skip) = 0;
1274 }
Yushin Cho77bba8d2016-11-04 16:36:56 -07001275#else // #if !CONFIG_PVQ
1276 // transform block size in pixels
1277 tx_blk_size = tx_size_wide[tx_size];
1278
1279 // copy uint8 orig and predicted block to int16 buffer
1280 // in order to use existing VP10 transform functions
1281 for (j = 0; j < tx_blk_size; j++)
1282 for (i = 0; i < tx_blk_size; i++) {
1283 src_int16[diff_stride * j + i] = src[src_stride * j + i];
1284 pred[diff_stride * j + i] = dst[dst_stride * j + i];
1285 }
1286
1287 fwd_txfm_param.rd_transform = 0;
Yaowu Xu856c55e2016-11-07 14:50:35 -08001288 fwd_txfm_param.tx_type = tx_type;
1289 fwd_txfm_param.tx_size = tx_size;
1290 fwd_txfm_param.fwd_txfm_opt = FWD_TXFM_OPT_NORMAL;
1291 fwd_txfm_param.lossless = xd->lossless[mbmi->segment_id];
Yushin Cho77bba8d2016-11-04 16:36:56 -07001292 fwd_txfm(src_int16, coeff, diff_stride, &fwd_txfm_param);
1293 fwd_txfm(pred, ref_coeff, diff_stride, &fwd_txfm_param);
1294
1295 // PVQ for intra mode block
1296 if (!x->skip_block)
1297 skip = av1_pvq_encode_helper(&x->daala_enc,
1298 coeff, // target original vector
1299 ref_coeff, // reference vector
1300 dqcoeff, // de-quantized vector
1301 eob, // End of Block marker
1302 pd->dequant, // aom's quantizers
1303 plane, // image plane
1304 tx_size, // block size in log_2 - 2
1305 tx_type,
1306 &x->rate, // rate measured
1307 x->pvq_speed,
1308 pvq_info); // PVQ info for a block
1309
1310 x->pvq_skip[plane] = skip;
1311
1312 if (!skip) mbmi->skip = 0;
1313
1314 // Since av1 does not have separate function which does inverse transform
1315 // but av1_inv_txfm_add_*x*() also does addition of predicted image to
1316 // inverse transformed image,
1317 // pass blank dummy image to av1_inv_txfm_add_*x*(), i.e. set dst as zeros
1318
1319 if (!skip) {
1320 for (j = 0; j < tx_blk_size; j++)
1321 for (i = 0; i < tx_blk_size; i++) dst[j * dst_stride + i] = 0;
1322
1323 switch (tx_size) {
1324 case TX_32X32:
1325 av1_inv_txfm_add_32x32(dqcoeff, dst, dst_stride, *eob, tx_type);
1326 break;
1327 case TX_16X16:
1328 av1_inv_txfm_add_16x16(dqcoeff, dst, dst_stride, *eob, tx_type);
1329 break;
1330 case TX_8X8:
1331 av1_inv_txfm_add_8x8(dqcoeff, dst, dst_stride, *eob, tx_type);
1332 break;
1333 case TX_4X4:
1334 // this is like av1_short_idct4x4 but has a special case around eob<=1
1335 // which is significant (not just an optimization) for the lossless
1336 // case.
1337 av1_inv_txfm_add_4x4(dqcoeff, dst, dst_stride, *eob, tx_type,
1338 xd->lossless[seg_id]);
1339 break;
1340 default: assert(0); break;
1341 }
1342 }
1343#endif // #if !CONFIG_PVQ
1344
1345#if !CONFIG_PVQ
1346 if (*eob) *(args->skip) = 0;
1347#else
1348// Note : *(args->skip) == mbmi->skip
1349#endif
Yaowu Xuc27fc142016-08-22 16:08:15 -07001350}
1351
Angie Chiangff6d8902016-10-21 11:02:09 -07001352void av1_encode_intra_block_plane(AV1_COMMON *cm, MACROBLOCK *x,
1353 BLOCK_SIZE bsize, int plane,
Yaowu Xuf883b422016-08-30 14:01:10 -07001354 int enable_optimize_b) {
Yaowu Xuc27fc142016-08-22 16:08:15 -07001355 const MACROBLOCKD *const xd = &x->e_mbd;
1356 ENTROPY_CONTEXT ta[2 * MAX_MIB_SIZE];
1357 ENTROPY_CONTEXT tl[2 * MAX_MIB_SIZE];
1358
Angie Chiangff6d8902016-10-21 11:02:09 -07001359 struct encode_b_args arg = {
1360 cm, x, NULL, &xd->mi[0]->mbmi.skip, ta, tl, enable_optimize_b
1361 };
Yaowu Xuc27fc142016-08-22 16:08:15 -07001362 if (enable_optimize_b) {
1363 const struct macroblockd_plane *const pd = &xd->plane[plane];
1364 const TX_SIZE tx_size =
1365 plane ? get_uv_tx_size(&xd->mi[0]->mbmi, pd) : xd->mi[0]->mbmi.tx_size;
Yaowu Xuf883b422016-08-30 14:01:10 -07001366 av1_get_entropy_contexts(bsize, tx_size, pd, ta, tl);
Yaowu Xuc27fc142016-08-22 16:08:15 -07001367 }
Yaowu Xuf883b422016-08-30 14:01:10 -07001368 av1_foreach_transformed_block_in_plane(xd, bsize, plane,
1369 av1_encode_block_intra, &arg);
Yaowu Xuc27fc142016-08-22 16:08:15 -07001370}
Yushin Cho77bba8d2016-11-04 16:36:56 -07001371
1372#if CONFIG_PVQ
1373int av1_pvq_encode_helper(daala_enc_ctx *daala_enc, tran_low_t *const coeff,
1374 tran_low_t *ref_coeff, tran_low_t *const dqcoeff,
1375 uint16_t *eob, const int16_t *quant, int plane,
1376 int tx_size, TX_TYPE tx_type, int *rate, int speed,
1377 PVQ_INFO *pvq_info) {
1378 const int tx_blk_size = tx_size_wide[tx_size];
1379 int skip;
1380 // TODO(yushin): Enable this later, when pvq_qm_q4 is available in AOM.
1381 // int pvq_dc_quant = OD_MAXI(1,
1382 // quant * daala_enc->state.pvq_qm_q4[plane][od_qm_get_index(tx_size, 0)] >>
1383 // 4);
1384 int quant_shift = tx_size == TX_32X32 ? 1 : 0;
1385 // DC quantizer for PVQ
1386 int pvq_dc_quant = OD_MAXI(1, quant[0] >> quant_shift);
1387 int tell;
1388 int has_dc_skip = 1;
1389 int i;
1390 int off = od_qm_offset(tx_size, plane ? 1 : 0);
1391#if PVQ_CHROMA_RD
1392 double save_pvq_lambda;
1393#endif
1394 DECLARE_ALIGNED(16, int16_t, coeff_pvq[OD_BSIZE_MAX * OD_BSIZE_MAX]);
1395 DECLARE_ALIGNED(16, int16_t, ref_coeff_pvq[OD_BSIZE_MAX * OD_BSIZE_MAX]);
1396 DECLARE_ALIGNED(16, int16_t, dqcoeff_pvq[OD_BSIZE_MAX * OD_BSIZE_MAX]);
1397
1398 DECLARE_ALIGNED(16, int32_t, in_int32[OD_BSIZE_MAX * OD_BSIZE_MAX]);
1399 DECLARE_ALIGNED(16, int32_t, ref_int32[OD_BSIZE_MAX * OD_BSIZE_MAX]);
1400 DECLARE_ALIGNED(16, int32_t, out_int32[OD_BSIZE_MAX * OD_BSIZE_MAX]);
1401
1402 *eob = 0;
1403
1404 tell = od_ec_enc_tell_frac(&daala_enc->ec);
1405
1406 // Change coefficient ordering for pvq encoding.
1407 od_raster_to_coding_order(coeff_pvq, tx_blk_size, tx_type, coeff,
1408 tx_blk_size);
1409 od_raster_to_coding_order(ref_coeff_pvq, tx_blk_size, tx_type, ref_coeff,
1410 tx_blk_size);
1411
1412 // copy int16 inputs to int32
1413 for (i = 0; i < tx_blk_size * tx_blk_size; i++) {
1414 ref_int32[i] = ref_coeff_pvq[i];
1415 in_int32[i] = coeff_pvq[i];
1416 }
1417
1418#if PVQ_CHROMA_RD
1419 if (plane != 0) {
1420 save_pvq_lambda = daala_enc->pvq_norm_lambda;
1421 daala_enc->pvq_norm_lambda *= 0.8;
1422 }
1423#endif
1424 if (abs(in_int32[0] - ref_int32[0]) < pvq_dc_quant * 141 / 256) { /* 0.55 */
1425 out_int32[0] = 0;
1426 } else {
1427 out_int32[0] = OD_DIV_R0(in_int32[0] - ref_int32[0], pvq_dc_quant);
1428 }
1429
1430 skip = od_pvq_encode(
1431 daala_enc, ref_int32, in_int32, out_int32,
1432 (int)quant[0] >> quant_shift, // scale/quantizer
1433 (int)quant[1] >> quant_shift, // scale/quantizer
1434 // TODO(yushin): Instead of 0,
1435 // use daala_enc->use_activity_masking for activity masking.
Yaowu Xud6ea71c2016-11-07 10:24:14 -08001436 plane, tx_size, OD_PVQ_BETA[0][plane][tx_size], OD_ROBUST_STREAM,
Yushin Cho77bba8d2016-11-04 16:36:56 -07001437 0, // is_keyframe,
1438 0, 0, 0, // q_scaling, bx, by,
1439 daala_enc->state.qm + off, daala_enc->state.qm_inv + off,
1440 speed, // speed
1441 pvq_info);
1442
1443 if (skip && pvq_info) assert(pvq_info->ac_dc_coded == 0);
1444
1445 if (!skip && pvq_info) assert(pvq_info->ac_dc_coded > 0);
1446
1447 // Encode residue of DC coeff, if required.
1448 if (!has_dc_skip || out_int32[0]) {
1449 generic_encode(&daala_enc->ec, &daala_enc->state.adapt.model_dc[plane],
1450 abs(out_int32[0]) - has_dc_skip, -1,
1451 &daala_enc->state.adapt.ex_dc[plane][tx_size][0], 2);
1452 }
1453 if (out_int32[0]) {
1454 od_ec_enc_bits(&daala_enc->ec, out_int32[0] < 0, 1);
1455 skip = 0;
1456 }
1457
1458 // need to save quantized residue of DC coeff
1459 // so that final pvq bitstream writing can know whether DC is coded.
1460 if (pvq_info) pvq_info->dq_dc_residue = out_int32[0];
1461
1462 out_int32[0] = out_int32[0] * pvq_dc_quant;
1463 out_int32[0] += ref_int32[0];
1464
1465 // copy int32 result back to int16
1466 for (i = 0; i < tx_blk_size * tx_blk_size; i++) dqcoeff_pvq[i] = out_int32[i];
1467
1468 // Back to original coefficient order
1469 od_coding_order_to_raster(dqcoeff, tx_blk_size, tx_type, dqcoeff_pvq,
1470 tx_blk_size);
1471
1472 *eob = tx_blk_size * tx_blk_size;
1473
1474 *rate = (od_ec_enc_tell_frac(&daala_enc->ec) - tell)
1475 << (AV1_PROB_COST_SHIFT - OD_BITRES);
1476 assert(*rate >= 0);
1477#if PVQ_CHROMA_RD
1478 if (plane != 0) daala_enc->pvq_norm_lambda = save_pvq_lambda;
1479#endif
1480 return skip;
1481}
1482
1483void av1_store_pvq_enc_info(PVQ_INFO *pvq_info, int *qg, int *theta,
1484 int *max_theta, int *k, od_coeff *y, int nb_bands,
1485 const int *off, int *size, int skip_rest,
1486 int skip_dir,
1487 int bs) { // block size in log_2 -2
1488 int i;
1489 const int tx_blk_size = tx_size_wide[bs];
1490
1491 for (i = 0; i < nb_bands; i++) {
1492 pvq_info->qg[i] = qg[i];
1493 pvq_info->theta[i] = theta[i];
1494 pvq_info->max_theta[i] = max_theta[i];
1495 pvq_info->k[i] = k[i];
1496 pvq_info->off[i] = off[i];
1497 pvq_info->size[i] = size[i];
1498 }
1499
1500 memcpy(pvq_info->y, y, tx_blk_size * tx_blk_size * sizeof(od_coeff));
1501
1502 pvq_info->nb_bands = nb_bands;
1503 pvq_info->skip_rest = skip_rest;
1504 pvq_info->skip_dir = skip_dir;
1505 pvq_info->bs = bs;
1506}
1507#endif