Bug Summary

File:root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c
Warning:line 1485, column 26
Subtraction of a null pointer (from variable 'tok') and a probably non-null pointer (via field 'start') may result in undefined behavior

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -O2 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name encodeframe.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=all -relaxed-aliasing -ffp-contract=off -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fdebug-compilation-dir=/root/firefox-clang/obj-x86_64-pc-linux-gnu/media/libaom -fcoverage-compilation-dir=/root/firefox-clang/obj-x86_64-pc-linux-gnu/media/libaom -resource-dir /usr/lib/llvm-22/lib/clang/22 -include /root/firefox-clang/obj-x86_64-pc-linux-gnu/mozilla-config.h -U _FORTIFY_SOURCE -D _FORTIFY_SOURCE=2 -D _GLIBCXX_ASSERTIONS -D DEBUG=1 -D MOZ_HAS_MOZGLUE -I /root/firefox-clang/media/libaom -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/media/libaom -I /root/firefox-clang/media/libaom/config/linux/x64 -I /root/firefox-clang/media/libaom/config -I /root/firefox-clang/third_party/aom -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/include -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/include/nspr -I /root/firefox-clang/obj-x86_64-pc-linux-gnu/dist/include/nss -D MOZILLA_CLIENT -internal-isystem /usr/lib/llvm-22/lib/clang/22/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/15/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -Wno-error=tautological-type-limit-compare -Wno-range-loop-analysis -Wno-error=deprecated-declarations -Wno-error=array-bounds -Wno-error=free-nonheap-object -Wno-error=atomic-alignment -Wno-error=deprecated-builtins -Wno-psabi -Wno-error=builtin-macro-redefined -Wno-unknown-warning-option -Wno-character-conversion -Wno-sign-compare -Wno-unused-function -Wno-unreachable-code -Wno-unneeded-internal-declaration -ferror-limit 19 -fstrict-flex-arrays=1 -stack-protector 2 -fstack-clash-protection -ftrivial-auto-var-init=pattern -fgnuc-version=4.2.1 -fskip-odr-check-in-gmf -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -fdwarf2-cfi-asm -o /tmp/scan-build-2026-01-17-100050-2808198-1 -x c /root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c
1/*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved.
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12#include <limits.h>
13#include <float.h>
14#include <math.h>
15#include <stdbool.h>
16#include <stdio.h>
17
18#include "config/aom_config.h"
19#include "config/aom_dsp_rtcd.h"
20#include "config/av1_rtcd.h"
21
22#include "aom_dsp/aom_dsp_common.h"
23#include "aom_dsp/binary_codes_writer.h"
24#include "aom_ports/mem.h"
25#include "aom_ports/aom_timer.h"
26#include "aom_util/aom_pthread.h"
27#if CONFIG_MISMATCH_DEBUG0
28#include "aom_util/debug_util.h"
29#endif // CONFIG_MISMATCH_DEBUG
30
31#include "av1/common/cfl.h"
32#include "av1/common/common.h"
33#include "av1/common/common_data.h"
34#include "av1/common/entropy.h"
35#include "av1/common/entropymode.h"
36#include "av1/common/idct.h"
37#include "av1/common/mv.h"
38#include "av1/common/mvref_common.h"
39#include "av1/common/pred_common.h"
40#include "av1/common/quant_common.h"
41#include "av1/common/reconintra.h"
42#include "av1/common/reconinter.h"
43#include "av1/common/seg_common.h"
44#include "av1/common/tile_common.h"
45#include "av1/common/warped_motion.h"
46
47#include "av1/encoder/allintra_vis.h"
48#include "av1/encoder/aq_complexity.h"
49#include "av1/encoder/aq_cyclicrefresh.h"
50#include "av1/encoder/aq_variance.h"
51#include "av1/encoder/av1_quantize.h"
52#include "av1/encoder/global_motion_facade.h"
53#include "av1/encoder/encodeframe.h"
54#include "av1/encoder/encodeframe_utils.h"
55#include "av1/encoder/encodemb.h"
56#include "av1/encoder/encodemv.h"
57#include "av1/encoder/encodetxb.h"
58#include "av1/encoder/ethread.h"
59#include "av1/encoder/extend.h"
60#include "av1/encoder/intra_mode_search_utils.h"
61#include "av1/encoder/ml.h"
62#include "av1/encoder/motion_search_facade.h"
63#include "av1/encoder/partition_strategy.h"
64#if !CONFIG_REALTIME_ONLY0
65#include "av1/encoder/partition_model_weights.h"
66#endif
67#include "av1/encoder/partition_search.h"
68#include "av1/encoder/rd.h"
69#include "av1/encoder/rdopt.h"
70#include "av1/encoder/reconinter_enc.h"
71#include "av1/encoder/segmentation.h"
72#include "av1/encoder/tokenize.h"
73#include "av1/encoder/tpl_model.h"
74#include "av1/encoder/var_based_part.h"
75
76#if CONFIG_TUNE_VMAF0
77#include "av1/encoder/tune_vmaf.h"
78#endif
79
80/*!\cond */
81// This is used as a reference when computing the source variance for the
82// purposes of activity masking.
83// Eventually this should be replaced by custom no-reference routines,
84// which will be faster.
85static const uint8_t AV1_VAR_OFFS[MAX_SB_SIZE(1 << 7)] = {
86 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
87 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
88 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
89 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
90 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
91 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
92 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
93 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
94 128, 128, 128, 128, 128, 128, 128, 128
95};
96
97#if CONFIG_AV1_HIGHBITDEPTH1
98static const uint16_t AV1_HIGH_VAR_OFFS_8[MAX_SB_SIZE(1 << 7)] = {
99 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
100 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
101 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
102 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
103 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
104 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
105 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
106 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
107 128, 128, 128, 128, 128, 128, 128, 128
108};
109
110static const uint16_t AV1_HIGH_VAR_OFFS_10[MAX_SB_SIZE(1 << 7)] = {
111 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
112 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
113 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
114 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
115 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
116 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
117 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
118 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
119 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
120 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
121 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
122 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
123 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
124 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
125 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4,
126 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4
127};
128
129static const uint16_t AV1_HIGH_VAR_OFFS_12[MAX_SB_SIZE(1 << 7)] = {
130 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
131 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
132 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
133 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
134 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
135 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
136 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
137 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
138 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
139 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
140 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
141 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
142 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
143 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
144 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
145 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
146 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
147 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
148 128 * 16, 128 * 16
149};
150#endif // CONFIG_AV1_HIGHBITDEPTH
151/*!\endcond */
152
153// For the given bit depth, returns a constant array used to assist the
154// calculation of source block variance, which will then be used to decide
155// adaptive quantizers.
156static const uint8_t *get_var_offs(int use_hbd, int bd) {
157#if CONFIG_AV1_HIGHBITDEPTH1
158 if (use_hbd) {
159 assert(bd == 8 || bd == 10 || bd == 12)((void) sizeof ((bd == 8 || bd == 10 || bd == 12) ? 1 : 0), __extension__
({ if (bd == 8 || bd == 10 || bd == 12) ; else __assert_fail
("bd == 8 || bd == 10 || bd == 12", "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 159, __extension__ __PRETTY_FUNCTION__); }))
;
160 const int off_index = (bd - 8) >> 1;
161 static const uint16_t *high_var_offs[3] = { AV1_HIGH_VAR_OFFS_8,
162 AV1_HIGH_VAR_OFFS_10,
163 AV1_HIGH_VAR_OFFS_12 };
164 return CONVERT_TO_BYTEPTR(high_var_offs[off_index])((uint8_t *)(((uintptr_t)(high_var_offs[off_index])) >>
1))
;
165 }
166#else
167 (void)use_hbd;
168 (void)bd;
169 assert(!use_hbd)((void) sizeof ((!use_hbd) ? 1 : 0), __extension__ ({ if (!use_hbd
) ; else __assert_fail ("!use_hbd", "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 169, __extension__ __PRETTY_FUNCTION__); }))
;
170#endif
171 assert(bd == 8)((void) sizeof ((bd == 8) ? 1 : 0), __extension__ ({ if (bd ==
8) ; else __assert_fail ("bd == 8", "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 171, __extension__ __PRETTY_FUNCTION__); }))
;
172 return AV1_VAR_OFFS;
173}
174
175void av1_init_rtc_counters(MACROBLOCK *const x) {
176 av1_init_cyclic_refresh_counters(x);
177 x->cnt_zeromv = 0;
178}
179
180void av1_accumulate_rtc_counters(AV1_COMP *cpi, const MACROBLOCK *const x) {
181 if (cpi->oxcf.q_cfg.aq_mode == CYCLIC_REFRESH_AQ)
182 av1_accumulate_cyclic_refresh_counters(cpi->cyclic_refresh, x);
183 cpi->rc.cnt_zeromv += x->cnt_zeromv;
184 cpi->rc.num_col_blscroll_last_tl0 += x->sb_col_scroll;
185 cpi->rc.num_row_blscroll_last_tl0 += x->sb_row_scroll;
186}
187
188unsigned int av1_get_perpixel_variance(const AV1_COMP *cpi,
189 const MACROBLOCKD *xd,
190 const struct buf_2d *ref,
191 BLOCK_SIZE bsize, int plane,
192 int use_hbd) {
193 const int subsampling_x = xd->plane[plane].subsampling_x;
194 const int subsampling_y = xd->plane[plane].subsampling_y;
195 const BLOCK_SIZE plane_bsize =
196 get_plane_block_size(bsize, subsampling_x, subsampling_y);
197 unsigned int sse;
198 const unsigned int var = cpi->ppi->fn_ptr[plane_bsize].vf(
199 ref->buf, ref->stride, get_var_offs(use_hbd, xd->bd), 0, &sse);
200 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[plane_bsize])(((var) + (((1 << (num_pels_log2_lookup[plane_bsize])) >>
1))) >> (num_pels_log2_lookup[plane_bsize]))
;
201}
202
203unsigned int av1_get_perpixel_variance_facade(const AV1_COMP *cpi,
204 const MACROBLOCKD *xd,
205 const struct buf_2d *ref,
206 BLOCK_SIZE bsize, int plane) {
207 const int use_hbd = is_cur_buf_hbd(xd);
208 return av1_get_perpixel_variance(cpi, xd, ref, bsize, plane, use_hbd);
209}
210
211void av1_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
212 int mi_row, int mi_col, const int num_planes,
213 BLOCK_SIZE bsize) {
214 // Set current frame pointer.
215 x->e_mbd.cur_buf = src;
216
217 // We use AOMMIN(num_planes, MAX_MB_PLANE) instead of num_planes to quiet
218 // the static analysis warnings.
219 for (int i = 0; i < AOMMIN(num_planes, MAX_MB_PLANE)(((num_planes) < (3)) ? (num_planes) : (3)); i++) {
220 const int is_uv = i > 0;
221 setup_pred_plane(
222 &x->plane[i].src, bsize, src->buffers[i], src->crop_widths[is_uv],
223 src->crop_heights[is_uv], src->strides[is_uv], mi_row, mi_col, NULL((void*)0),
224 x->e_mbd.plane[i].subsampling_x, x->e_mbd.plane[i].subsampling_y);
225 }
226}
227
228#if !CONFIG_REALTIME_ONLY0
229/*!\brief Assigns different quantization parameters to each superblock
230 * based on statistics relevant to the selected delta-q mode (variance).
231 * This is the non-rd version.
232 *
233 * \param[in] cpi Top level encoder instance structure
234 * \param[in,out] td Thread data structure
235 * \param[in,out] x Superblock level data for this block.
236 * \param[in] tile_info Tile information / identification
237 * \param[in] mi_row Block row (in "MI_SIZE" units) index
238 * \param[in] mi_col Block column (in "MI_SIZE" units) index
239 * \param[out] num_planes Number of image planes (e.g. Y,U,V)
240 *
241 * \remark No return value but updates superblock and thread data
242 * related to the q / q delta to be used.
243 */
244static inline void setup_delta_q_nonrd(AV1_COMP *const cpi, ThreadData *td,
245 MACROBLOCK *const x,
246 const TileInfo *const tile_info,
247 int mi_row, int mi_col, int num_planes) {
248 AV1_COMMON *const cm = &cpi->common;
249 const DeltaQInfo *const delta_q_info = &cm->delta_q_info;
250 assert(delta_q_info->delta_q_present_flag)((void) sizeof ((delta_q_info->delta_q_present_flag) ? 1 :
0), __extension__ ({ if (delta_q_info->delta_q_present_flag
) ; else __assert_fail ("delta_q_info->delta_q_present_flag"
, "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 250, __extension__ __PRETTY_FUNCTION__); }))
;
251
252 const BLOCK_SIZE sb_size = cm->seq_params->sb_size;
253 av1_setup_src_planes(x, cpi->source, mi_row, mi_col, num_planes, sb_size);
254
255 const int delta_q_res = delta_q_info->delta_q_res;
256 int current_qindex = cm->quant_params.base_qindex;
257
258 if (cpi->oxcf.q_cfg.deltaq_mode == DELTA_Q_VARIANCE_BOOST) {
259 current_qindex = av1_get_sbq_variance_boost(cpi, x);
260 }
261
262 x->rdmult_cur_qindex = current_qindex;
263 MACROBLOCKD *const xd = &x->e_mbd;
264 current_qindex = av1_adjust_q_from_delta_q_res(
265 delta_q_res, xd->current_base_qindex, current_qindex);
266
267 x->delta_qindex = current_qindex - cm->quant_params.base_qindex;
268 x->rdmult_delta_qindex = x->delta_qindex;
269
270 av1_set_offsets(cpi, tile_info, x, mi_row, mi_col, sb_size);
271 xd->mi[0]->current_qindex = current_qindex;
272 av1_init_plane_quantizers(cpi, x, xd->mi[0]->segment_id, 0);
273
274 // keep track of any non-zero delta-q used
275 td->deltaq_used |= (x->delta_qindex != 0);
276}
277
278/*!\brief Assigns different quantization parameters to each superblock
279 * based on statistics relevant to the selected delta-q mode (TPL weight,
280 * variance, HDR, etc).
281 *
282 * \ingroup tpl_modelling
283 *
284 * \param[in] cpi Top level encoder instance structure
285 * \param[in,out] td Thread data structure
286 * \param[in,out] x Superblock level data for this block.
287 * \param[in] tile_info Tile information / identification
288 * \param[in] mi_row Block row (in "MI_SIZE" units) index
289 * \param[in] mi_col Block column (in "MI_SIZE" units) index
290 * \param[out] num_planes Number of image planes (e.g. Y,U,V)
291 *
292 * \remark No return value but updates superblock and thread data
293 * related to the q / q delta to be used.
294 */
295static inline void setup_delta_q(AV1_COMP *const cpi, ThreadData *td,
296 MACROBLOCK *const x,
297 const TileInfo *const tile_info, int mi_row,
298 int mi_col, int num_planes) {
299 AV1_COMMON *const cm = &cpi->common;
300 const CommonModeInfoParams *const mi_params = &cm->mi_params;
301 const DeltaQInfo *const delta_q_info = &cm->delta_q_info;
302 assert(delta_q_info->delta_q_present_flag)((void) sizeof ((delta_q_info->delta_q_present_flag) ? 1 :
0), __extension__ ({ if (delta_q_info->delta_q_present_flag
) ; else __assert_fail ("delta_q_info->delta_q_present_flag"
, "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 302, __extension__ __PRETTY_FUNCTION__); }))
;
303
304 const BLOCK_SIZE sb_size = cm->seq_params->sb_size;
305 av1_setup_src_planes(x, cpi->source, mi_row, mi_col, num_planes, sb_size);
306
307 const int delta_q_res = delta_q_info->delta_q_res;
308 int current_qindex = cm->quant_params.base_qindex;
309 if (cpi->use_ducky_encode && cpi->ducky_encode_info.frame_info.qp_mode ==
310 DUCKY_ENCODE_FRAME_MODE_QINDEX) {
311 const int sb_row = mi_row >> cm->seq_params->mib_size_log2;
312 const int sb_col = mi_col >> cm->seq_params->mib_size_log2;
313 const int sb_cols =
314 CEIL_POWER_OF_TWO(cm->mi_params.mi_cols, cm->seq_params->mib_size_log2)(((cm->mi_params.mi_cols) + (1 << (cm->seq_params
->mib_size_log2)) - 1) >> (cm->seq_params->mib_size_log2
))
;
315 const int sb_index = sb_row * sb_cols + sb_col;
316 current_qindex =
317 cpi->ducky_encode_info.frame_info.superblock_encode_qindex[sb_index];
318 } else if (cpi->oxcf.q_cfg.deltaq_mode == DELTA_Q_PERCEPTUAL) {
319 if (DELTA_Q_PERCEPTUAL_MODULATION1 == 1) {
320 const int block_wavelet_energy_level =
321 av1_block_wavelet_energy_level(cpi, x, sb_size);
322 x->sb_energy_level = block_wavelet_energy_level;
323 current_qindex = av1_compute_q_from_energy_level_deltaq_mode(
324 cpi, block_wavelet_energy_level);
325 } else {
326 const int block_var_level = av1_log_block_var(cpi, x, sb_size);
327 x->sb_energy_level = block_var_level;
328 current_qindex =
329 av1_compute_q_from_energy_level_deltaq_mode(cpi, block_var_level);
330 }
331 } else if (cpi->oxcf.q_cfg.deltaq_mode == DELTA_Q_OBJECTIVE &&
332 cpi->oxcf.algo_cfg.enable_tpl_model) {
333 // Setup deltaq based on tpl stats
334 current_qindex =
335 av1_get_q_for_deltaq_objective(cpi, td, NULL((void*)0), sb_size, mi_row, mi_col);
336 } else if (cpi->oxcf.q_cfg.deltaq_mode == DELTA_Q_PERCEPTUAL_AI) {
337 current_qindex = av1_get_sbq_perceptual_ai(cpi, sb_size, mi_row, mi_col);
338 } else if (cpi->oxcf.q_cfg.deltaq_mode == DELTA_Q_USER_RATING_BASED) {
339 current_qindex = av1_get_sbq_user_rating_based(cpi, mi_row, mi_col);
340 } else if (cpi->oxcf.q_cfg.enable_hdr_deltaq) {
341 current_qindex = av1_get_q_for_hdr(cpi, x, sb_size, mi_row, mi_col);
342 } else if (cpi->oxcf.q_cfg.deltaq_mode == DELTA_Q_VARIANCE_BOOST) {
343 current_qindex = av1_get_sbq_variance_boost(cpi, x);
344 }
345
346 x->rdmult_cur_qindex = current_qindex;
347 MACROBLOCKD *const xd = &x->e_mbd;
348 const int adjusted_qindex = av1_adjust_q_from_delta_q_res(
349 delta_q_res, xd->current_base_qindex, current_qindex);
350 if (cpi->use_ducky_encode) {
351 assert(adjusted_qindex == current_qindex)((void) sizeof ((adjusted_qindex == current_qindex) ? 1 : 0),
__extension__ ({ if (adjusted_qindex == current_qindex) ; else
__assert_fail ("adjusted_qindex == current_qindex", "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 351, __extension__ __PRETTY_FUNCTION__); }))
;
352 }
353 current_qindex = adjusted_qindex;
354
355 x->delta_qindex = current_qindex - cm->quant_params.base_qindex;
356 x->rdmult_delta_qindex = x->delta_qindex;
357
358 av1_set_offsets(cpi, tile_info, x, mi_row, mi_col, sb_size);
359 xd->mi[0]->current_qindex = current_qindex;
360 av1_init_plane_quantizers(cpi, x, xd->mi[0]->segment_id, 0);
361
362 // keep track of any non-zero delta-q used
363 td->deltaq_used |= (x->delta_qindex != 0);
364
365 if (cpi->oxcf.tool_cfg.enable_deltalf_mode) {
366 const int delta_lf_res = delta_q_info->delta_lf_res;
367 const int lfmask = ~(delta_lf_res - 1);
368 const int delta_lf_from_base =
369 ((x->delta_qindex / 4 + delta_lf_res / 2) & lfmask);
370 const int8_t delta_lf =
371 (int8_t)clamp(delta_lf_from_base, -MAX_LOOP_FILTER63, MAX_LOOP_FILTER63);
372 const int frame_lf_count =
373 av1_num_planes(cm) > 1 ? FRAME_LF_COUNT4 : FRAME_LF_COUNT4 - 2;
374 const int mib_size = cm->seq_params->mib_size;
375
376 // pre-set the delta lf for loop filter. Note that this value is set
377 // before mi is assigned for each block in current superblock
378 for (int j = 0; j < AOMMIN(mib_size, mi_params->mi_rows - mi_row)(((mib_size) < (mi_params->mi_rows - mi_row)) ? (mib_size
) : (mi_params->mi_rows - mi_row))
; j++) {
379 for (int k = 0; k < AOMMIN(mib_size, mi_params->mi_cols - mi_col)(((mib_size) < (mi_params->mi_cols - mi_col)) ? (mib_size
) : (mi_params->mi_cols - mi_col))
; k++) {
380 const int grid_idx = get_mi_grid_idx(mi_params, mi_row + j, mi_col + k);
381 mi_params->mi_alloc[grid_idx].delta_lf_from_base = delta_lf;
382 for (int lf_id = 0; lf_id < frame_lf_count; ++lf_id) {
383 mi_params->mi_alloc[grid_idx].delta_lf[lf_id] = delta_lf;
384 }
385 }
386 }
387 }
388}
389
390static void init_ref_frame_space(AV1_COMP *cpi, ThreadData *td, int mi_row,
391 int mi_col) {
392 const AV1_COMMON *cm = &cpi->common;
393 const GF_GROUP *const gf_group = &cpi->ppi->gf_group;
394 const CommonModeInfoParams *const mi_params = &cm->mi_params;
395 MACROBLOCK *x = &td->mb;
396 const int frame_idx = cpi->gf_frame_index;
397 TplParams *const tpl_data = &cpi->ppi->tpl_data;
398 const uint8_t block_mis_log2 = tpl_data->tpl_stats_block_mis_log2;
399
400 av1_zero(x->tpl_keep_ref_frame)memset(&(x->tpl_keep_ref_frame), 0, sizeof(x->tpl_keep_ref_frame
))
;
401
402 if (!av1_tpl_stats_ready(tpl_data, frame_idx)) return;
403 if (!is_frame_tpl_eligible(gf_group, cpi->gf_frame_index)) return;
404 if (cpi->oxcf.q_cfg.aq_mode != NO_AQ) return;
405
406 const int is_overlay =
407 cpi->ppi->gf_group.update_type[frame_idx] == OVERLAY_UPDATE;
408 if (is_overlay) {
409 memset(x->tpl_keep_ref_frame, 1, sizeof(x->tpl_keep_ref_frame));
410 return;
411 }
412
413 TplDepFrame *tpl_frame = &tpl_data->tpl_frame[frame_idx];
414 TplDepStats *tpl_stats = tpl_frame->tpl_stats_ptr;
415 const int tpl_stride = tpl_frame->stride;
416 int64_t inter_cost[INTER_REFS_PER_FRAME] = { 0 };
417 const int step = 1 << block_mis_log2;
418 const BLOCK_SIZE sb_size = cm->seq_params->sb_size;
419
420 const int mi_row_end =
421 AOMMIN(mi_size_high[sb_size] + mi_row, mi_params->mi_rows)(((mi_size_high[sb_size] + mi_row) < (mi_params->mi_rows
)) ? (mi_size_high[sb_size] + mi_row) : (mi_params->mi_rows
))
;
422 const int mi_cols_sr = av1_pixels_to_mi(cm->superres_upscaled_width);
423 const int mi_col_sr =
424 coded_to_superres_mi(mi_col, cm->superres_scale_denominator);
425 const int mi_col_end_sr =
426 AOMMIN(coded_to_superres_mi(mi_col + mi_size_wide[sb_size],(((coded_to_superres_mi(mi_col + mi_size_wide[sb_size], cm->
superres_scale_denominator)) < (mi_cols_sr)) ? (coded_to_superres_mi
(mi_col + mi_size_wide[sb_size], cm->superres_scale_denominator
)) : (mi_cols_sr))
427 cm->superres_scale_denominator),(((coded_to_superres_mi(mi_col + mi_size_wide[sb_size], cm->
superres_scale_denominator)) < (mi_cols_sr)) ? (coded_to_superres_mi
(mi_col + mi_size_wide[sb_size], cm->superres_scale_denominator
)) : (mi_cols_sr))
428 mi_cols_sr)(((coded_to_superres_mi(mi_col + mi_size_wide[sb_size], cm->
superres_scale_denominator)) < (mi_cols_sr)) ? (coded_to_superres_mi
(mi_col + mi_size_wide[sb_size], cm->superres_scale_denominator
)) : (mi_cols_sr))
;
429 const int row_step = step;
430 const int col_step_sr =
431 coded_to_superres_mi(step, cm->superres_scale_denominator);
432 for (int row = mi_row; row < mi_row_end; row += row_step) {
433 for (int col = mi_col_sr; col < mi_col_end_sr; col += col_step_sr) {
434 const TplDepStats *this_stats =
435 &tpl_stats[av1_tpl_ptr_pos(row, col, tpl_stride, block_mis_log2)];
436 int64_t tpl_pred_error[INTER_REFS_PER_FRAME] = { 0 };
437 // Find the winner ref frame idx for the current block
438 int64_t best_inter_cost = this_stats->pred_error[0];
439 int best_rf_idx = 0;
440 for (int idx = 1; idx < INTER_REFS_PER_FRAME; ++idx) {
441 if ((this_stats->pred_error[idx] < best_inter_cost) &&
442 (this_stats->pred_error[idx] != 0)) {
443 best_inter_cost = this_stats->pred_error[idx];
444 best_rf_idx = idx;
445 }
446 }
447 // tpl_pred_error is the pred_error reduction of best_ref w.r.t.
448 // LAST_FRAME.
449 tpl_pred_error[best_rf_idx] = this_stats->pred_error[best_rf_idx] -
450 this_stats->pred_error[LAST_FRAME - 1];
451
452 for (int rf_idx = 1; rf_idx < INTER_REFS_PER_FRAME; ++rf_idx)
453 inter_cost[rf_idx] += tpl_pred_error[rf_idx];
454 }
455 }
456
457 int rank_index[INTER_REFS_PER_FRAME - 1];
458 for (int idx = 0; idx < INTER_REFS_PER_FRAME - 1; ++idx) {
459 rank_index[idx] = idx + 1;
460 for (int i = idx; i > 0; --i) {
461 if (inter_cost[rank_index[i - 1]] > inter_cost[rank_index[i]]) {
462 const int tmp = rank_index[i - 1];
463 rank_index[i - 1] = rank_index[i];
464 rank_index[i] = tmp;
465 }
466 }
467 }
468
469 x->tpl_keep_ref_frame[INTRA_FRAME] = 1;
470 x->tpl_keep_ref_frame[LAST_FRAME] = 1;
471
472 int cutoff_ref = 0;
473 for (int idx = 0; idx < INTER_REFS_PER_FRAME - 1; ++idx) {
474 x->tpl_keep_ref_frame[rank_index[idx] + LAST_FRAME] = 1;
475 if (idx > 2) {
476 if (!cutoff_ref) {
477 // If the predictive coding gains are smaller than the previous more
478 // relevant frame over certain amount, discard this frame and all the
479 // frames afterwards.
480 if (llabs(inter_cost[rank_index[idx]]) <
481 llabs(inter_cost[rank_index[idx - 1]]) / 8 ||
482 inter_cost[rank_index[idx]] == 0)
483 cutoff_ref = 1;
484 }
485
486 if (cutoff_ref) x->tpl_keep_ref_frame[rank_index[idx] + LAST_FRAME] = 0;
487 }
488 }
489}
490
491static inline void adjust_rdmult_tpl_model(AV1_COMP *cpi, MACROBLOCK *x,
492 int mi_row, int mi_col) {
493 const BLOCK_SIZE sb_size = cpi->common.seq_params->sb_size;
494 const int orig_rdmult = cpi->rd.RDMULT;
495
496 assert(IMPLIES(cpi->ppi->gf_group.size > 0,((void) sizeof (((!(cpi->ppi->gf_group.size > 0) || (
cpi->gf_frame_index < cpi->ppi->gf_group.size))) ?
1 : 0), __extension__ ({ if ((!(cpi->ppi->gf_group.size
> 0) || (cpi->gf_frame_index < cpi->ppi->gf_group
.size))) ; else __assert_fail ("IMPLIES(cpi->ppi->gf_group.size > 0, cpi->gf_frame_index < cpi->ppi->gf_group.size)"
, "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 497, __extension__ __PRETTY_FUNCTION__); }))
497 cpi->gf_frame_index < cpi->ppi->gf_group.size))((void) sizeof (((!(cpi->ppi->gf_group.size > 0) || (
cpi->gf_frame_index < cpi->ppi->gf_group.size))) ?
1 : 0), __extension__ ({ if ((!(cpi->ppi->gf_group.size
> 0) || (cpi->gf_frame_index < cpi->ppi->gf_group
.size))) ; else __assert_fail ("IMPLIES(cpi->ppi->gf_group.size > 0, cpi->gf_frame_index < cpi->ppi->gf_group.size)"
, "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 497, __extension__ __PRETTY_FUNCTION__); }))
;
498 const int gf_group_index = cpi->gf_frame_index;
499 if (cpi->oxcf.algo_cfg.enable_tpl_model && cpi->oxcf.q_cfg.aq_mode == NO_AQ &&
500 cpi->oxcf.q_cfg.deltaq_mode == NO_DELTA_Q && gf_group_index > 0 &&
501 cpi->ppi->gf_group.update_type[gf_group_index] == ARF_UPDATE) {
502 const int dr =
503 av1_get_rdmult_delta(cpi, sb_size, mi_row, mi_col, orig_rdmult);
504 x->rdmult = dr;
505 }
506}
507#endif // !CONFIG_REALTIME_ONLY
508
509#if CONFIG_RT_ML_PARTITIONING0
510// Get a prediction(stored in x->est_pred) for the whole superblock.
511static void get_estimated_pred(AV1_COMP *cpi, const TileInfo *const tile,
512 MACROBLOCK *x, int mi_row, int mi_col) {
513 AV1_COMMON *const cm = &cpi->common;
514 const int is_key_frame = frame_is_intra_only(cm);
515 MACROBLOCKD *xd = &x->e_mbd;
516
517 // TODO(kyslov) Extend to 128x128
518 assert(cm->seq_params->sb_size == BLOCK_64X64)((void) sizeof ((cm->seq_params->sb_size == BLOCK_64X64
) ? 1 : 0), __extension__ ({ if (cm->seq_params->sb_size
== BLOCK_64X64) ; else __assert_fail ("cm->seq_params->sb_size == BLOCK_64X64"
, "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 518, __extension__ __PRETTY_FUNCTION__); }))
;
519
520 av1_set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64);
521
522 if (!is_key_frame) {
523 MB_MODE_INFO *mi = xd->mi[0];
524 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_yv12_buf(cm, LAST_FRAME);
525
526 assert(yv12 != NULL)((void) sizeof ((yv12 != ((void*)0)) ? 1 : 0), __extension__ (
{ if (yv12 != ((void*)0)) ; else __assert_fail ("yv12 != NULL"
, "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 526, __extension__ __PRETTY_FUNCTION__); }))
;
527
528 av1_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
529 get_ref_scale_factors(cm, LAST_FRAME), 1);
530 mi->ref_frame[0] = LAST_FRAME;
531 mi->ref_frame[1] = NONE;
532 mi->bsize = BLOCK_64X64;
533 mi->mv[0].as_int = 0;
534 mi->interp_filters = av1_broadcast_interp_filter(BILINEAR);
535
536 set_ref_ptrs(cm, xd, mi->ref_frame[0], mi->ref_frame[1]);
537
538 xd->plane[0].dst.buf = x->est_pred;
539 xd->plane[0].dst.stride = 64;
540 av1_enc_build_inter_predictor_y(xd, mi_row, mi_col);
541 } else {
542#if CONFIG_AV1_HIGHBITDEPTH1
543 switch (xd->bd) {
544 case 8: memset(x->est_pred, 128, 64 * 64 * sizeof(x->est_pred[0])); break;
545 case 10:
546 memset(x->est_pred, 128 * 4, 64 * 64 * sizeof(x->est_pred[0]));
547 break;
548 case 12:
549 memset(x->est_pred, 128 * 16, 64 * 64 * sizeof(x->est_pred[0]));
550 break;
551 }
552#else
553 memset(x->est_pred, 128, 64 * 64 * sizeof(x->est_pred[0]));
554#endif // CONFIG_VP9_HIGHBITDEPTH
555 }
556}
557#endif // CONFIG_RT_ML_PARTITIONING
558
559#define AVG_CDF_WEIGHT_LEFT3 3
560#define AVG_CDF_WEIGHT_TOP_RIGHT1 1
561
562/*!\brief Encode a superblock (minimal RD search involved)
563 *
564 * \ingroup partition_search
565 * Encodes the superblock by a pre-determined partition pattern, only minor
566 * rd-based searches are allowed to adjust the initial pattern. It is only used
567 * by realtime encoding.
568 */
569static inline void encode_nonrd_sb(AV1_COMP *cpi, ThreadData *td,
570 TileDataEnc *tile_data, TokenExtra **tp,
571 const int mi_row, const int mi_col,
572 const int seg_skip) {
573 AV1_COMMON *const cm = &cpi->common;
574 MACROBLOCK *const x = &td->mb;
575 const SPEED_FEATURES *const sf = &cpi->sf;
576 const TileInfo *const tile_info = &tile_data->tile_info;
577 MB_MODE_INFO **mi = cm->mi_params.mi_grid_base +
578 get_mi_grid_idx(&cm->mi_params, mi_row, mi_col);
579 const BLOCK_SIZE sb_size = cm->seq_params->sb_size;
580 PC_TREE *const pc_root = td->pc_root;
581
582#if !CONFIG_REALTIME_ONLY0
583 if (cm->delta_q_info.delta_q_present_flag) {
584 const int num_planes = av1_num_planes(cm);
585
586 setup_delta_q_nonrd(cpi, td, x, tile_info, mi_row, mi_col, num_planes);
587 }
588#endif
589#if CONFIG_RT_ML_PARTITIONING0
590 if (sf->part_sf.partition_search_type == ML_BASED_PARTITION) {
591 RD_STATS dummy_rdc;
592 get_estimated_pred(cpi, tile_info, x, mi_row, mi_col);
593 av1_nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col,
594 BLOCK_64X64, &dummy_rdc, 1, INT64_MAX(9223372036854775807L), pc_root);
595 return;
596 }
597#endif
598 // Set the partition
599 if (sf->part_sf.partition_search_type == FIXED_PARTITION || seg_skip ||
600 (sf->rt_sf.use_fast_fixed_part && x->sb_force_fixed_part == 1 &&
601 (!frame_is_intra_only(cm) &&
602 (!cpi->ppi->use_svc ||
603 !cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame)))) {
604 // set a fixed-size partition
605 av1_set_offsets(cpi, tile_info, x, mi_row, mi_col, sb_size);
606 BLOCK_SIZE bsize_select = sf->part_sf.fixed_partition_size;
607 if (sf->rt_sf.use_fast_fixed_part &&
608 x->content_state_sb.source_sad_nonrd < kLowSad) {
609 bsize_select = cm->seq_params->sb_size;
610 }
611 if (cpi->sf.rt_sf.skip_encoding_non_reference_slide_change &&
612 cpi->rc.high_source_sad && cpi->ppi->rtc_ref.non_reference_frame) {
613 bsize_select = cm->seq_params->sb_size;
614 x->force_zeromv_skip_for_sb = 1;
615 }
616 const BLOCK_SIZE bsize = seg_skip ? sb_size : bsize_select;
617 if (x->content_state_sb.source_sad_nonrd > kZeroSad)
618 x->force_color_check_block_level = 1;
619 av1_set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
620 } else if (sf->part_sf.partition_search_type == VAR_BASED_PARTITION) {
621 // set a variance-based partition
622 av1_set_offsets(cpi, tile_info, x, mi_row, mi_col, sb_size);
623 av1_choose_var_based_partitioning(cpi, tile_info, td, x, mi_row, mi_col);
624 }
625 assert(sf->part_sf.partition_search_type == FIXED_PARTITION || seg_skip ||((void) sizeof ((sf->part_sf.partition_search_type == FIXED_PARTITION
|| seg_skip || sf->part_sf.partition_search_type == VAR_BASED_PARTITION
) ? 1 : 0), __extension__ ({ if (sf->part_sf.partition_search_type
== FIXED_PARTITION || seg_skip || sf->part_sf.partition_search_type
== VAR_BASED_PARTITION) ; else __assert_fail ("sf->part_sf.partition_search_type == FIXED_PARTITION || seg_skip || sf->part_sf.partition_search_type == VAR_BASED_PARTITION"
, "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 626, __extension__ __PRETTY_FUNCTION__); }))
626 sf->part_sf.partition_search_type == VAR_BASED_PARTITION)((void) sizeof ((sf->part_sf.partition_search_type == FIXED_PARTITION
|| seg_skip || sf->part_sf.partition_search_type == VAR_BASED_PARTITION
) ? 1 : 0), __extension__ ({ if (sf->part_sf.partition_search_type
== FIXED_PARTITION || seg_skip || sf->part_sf.partition_search_type
== VAR_BASED_PARTITION) ; else __assert_fail ("sf->part_sf.partition_search_type == FIXED_PARTITION || seg_skip || sf->part_sf.partition_search_type == VAR_BASED_PARTITION"
, "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 626, __extension__ __PRETTY_FUNCTION__); }))
;
627 set_cb_offsets(td->mb.cb_offset, 0, 0);
628
629 // Initialize the flag to skip cdef to 1.
630 if (sf->rt_sf.skip_cdef_sb) {
631 const int block64_in_sb = (sb_size == BLOCK_128X128) ? 2 : 1;
632 // If 128x128 block is used, we need to set the flag for all 4 64x64 sub
633 // "blocks".
634 for (int r = 0; r < block64_in_sb; ++r) {
635 for (int c = 0; c < block64_in_sb; ++c) {
636 const int idx_in_sb =
637 r * MI_SIZE_64X64(64 >> 2) * cm->mi_params.mi_stride + c * MI_SIZE_64X64(64 >> 2);
638 if (mi[idx_in_sb]) mi[idx_in_sb]->cdef_strength = 1;
639 }
640 }
641 }
642
643#if CONFIG_COLLECT_COMPONENT_TIMING0
644 start_timing(cpi, nonrd_use_partition_time);
645#endif
646 av1_nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, sb_size,
647 pc_root);
648#if CONFIG_COLLECT_COMPONENT_TIMING0
649 end_timing(cpi, nonrd_use_partition_time);
650#endif
651}
652
653// This function initializes the stats for encode_rd_sb.
654static inline void init_encode_rd_sb(AV1_COMP *cpi, ThreadData *td,
655 const TileDataEnc *tile_data,
656 SIMPLE_MOTION_DATA_TREE *sms_root,
657 RD_STATS *rd_cost, int mi_row, int mi_col,
658 int gather_tpl_data) {
659 const AV1_COMMON *cm = &cpi->common;
660 const TileInfo *tile_info = &tile_data->tile_info;
661 MACROBLOCK *x = &td->mb;
662
663 const SPEED_FEATURES *sf = &cpi->sf;
664 const int use_simple_motion_search =
665 (sf->part_sf.simple_motion_search_split ||
666 sf->part_sf.simple_motion_search_prune_rect ||
667 sf->part_sf.simple_motion_search_early_term_none ||
668 sf->part_sf.ml_early_term_after_part_split_level) &&
669 !frame_is_intra_only(cm);
670 if (use_simple_motion_search) {
671 av1_init_simple_motion_search_mvs_for_sb(cpi, tile_info, x, sms_root,
672 mi_row, mi_col);
673 }
674
675#if !CONFIG_REALTIME_ONLY0
676 if (!(has_no_stats_stage(cpi) && cpi->oxcf.mode == REALTIME &&
677 cpi->oxcf.gf_cfg.lag_in_frames == 0)) {
678 init_ref_frame_space(cpi, td, mi_row, mi_col);
679 x->sb_energy_level = 0;
680 x->part_search_info.cnn_output_valid = 0;
681 if (gather_tpl_data) {
682 if (cm->delta_q_info.delta_q_present_flag) {
683 const int num_planes = av1_num_planes(cm);
684 const BLOCK_SIZE sb_size = cm->seq_params->sb_size;
685 setup_delta_q(cpi, td, x, tile_info, mi_row, mi_col, num_planes);
686 av1_tpl_rdmult_setup_sb(cpi, x, sb_size, mi_row, mi_col);
687 }
688
689 // TODO(jingning): revisit this function.
690 if (cpi->oxcf.algo_cfg.enable_tpl_model && (0)) {
691 adjust_rdmult_tpl_model(cpi, x, mi_row, mi_col);
692 }
693 }
694 }
695#else
696 (void)tile_info;
697 (void)mi_row;
698 (void)mi_col;
699 (void)gather_tpl_data;
700#endif
701
702 x->reuse_inter_pred = false0;
703 x->txfm_search_params.mode_eval_type = DEFAULT_EVAL;
704 reset_mb_rd_record(x->txfm_search_info.mb_rd_record);
705 av1_zero(x->picked_ref_frames_mask)memset(&(x->picked_ref_frames_mask), 0, sizeof(x->picked_ref_frames_mask
))
;
706 av1_invalid_rd_stats(rd_cost);
707}
708
709#if !CONFIG_REALTIME_ONLY0
710static void sb_qp_sweep_init_quantizers(AV1_COMP *cpi, ThreadData *td,
711 const TileDataEnc *tile_data,
712 SIMPLE_MOTION_DATA_TREE *sms_tree,
713 RD_STATS *rd_cost, int mi_row,
714 int mi_col, int delta_qp_ofs) {
715 AV1_COMMON *const cm = &cpi->common;
716 MACROBLOCK *const x = &td->mb;
717 const BLOCK_SIZE sb_size = cm->seq_params->sb_size;
718 const TileInfo *tile_info = &tile_data->tile_info;
719 const CommonModeInfoParams *const mi_params = &cm->mi_params;
720 const DeltaQInfo *const delta_q_info = &cm->delta_q_info;
721 assert(delta_q_info->delta_q_present_flag)((void) sizeof ((delta_q_info->delta_q_present_flag) ? 1 :
0), __extension__ ({ if (delta_q_info->delta_q_present_flag
) ; else __assert_fail ("delta_q_info->delta_q_present_flag"
, "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 721, __extension__ __PRETTY_FUNCTION__); }))
;
722 const int delta_q_res = delta_q_info->delta_q_res;
723
724 const SPEED_FEATURES *sf = &cpi->sf;
725 const int use_simple_motion_search =
726 (sf->part_sf.simple_motion_search_split ||
727 sf->part_sf.simple_motion_search_prune_rect ||
728 sf->part_sf.simple_motion_search_early_term_none ||
729 sf->part_sf.ml_early_term_after_part_split_level) &&
730 !frame_is_intra_only(cm);
731 if (use_simple_motion_search) {
732 av1_init_simple_motion_search_mvs_for_sb(cpi, tile_info, x, sms_tree,
733 mi_row, mi_col);
734 }
735
736 int current_qindex = x->rdmult_cur_qindex + delta_qp_ofs;
737
738 MACROBLOCKD *const xd = &x->e_mbd;
739 current_qindex = av1_adjust_q_from_delta_q_res(
740 delta_q_res, xd->current_base_qindex, current_qindex);
741
742 x->delta_qindex = current_qindex - cm->quant_params.base_qindex;
743
744 av1_set_offsets(cpi, tile_info, x, mi_row, mi_col, sb_size);
745 xd->mi[0]->current_qindex = current_qindex;
746 av1_init_plane_quantizers(cpi, x, xd->mi[0]->segment_id, 0);
747
748 // keep track of any non-zero delta-q used
749 td->deltaq_used |= (x->delta_qindex != 0);
750
751 if (cpi->oxcf.tool_cfg.enable_deltalf_mode) {
752 const int delta_lf_res = delta_q_info->delta_lf_res;
753 const int lfmask = ~(delta_lf_res - 1);
754 const int delta_lf_from_base =
755 ((x->delta_qindex / 4 + delta_lf_res / 2) & lfmask);
756 const int8_t delta_lf =
757 (int8_t)clamp(delta_lf_from_base, -MAX_LOOP_FILTER63, MAX_LOOP_FILTER63);
758 const int frame_lf_count =
759 av1_num_planes(cm) > 1 ? FRAME_LF_COUNT4 : FRAME_LF_COUNT4 - 2;
760 const int mib_size = cm->seq_params->mib_size;
761
762 // pre-set the delta lf for loop filter. Note that this value is set
763 // before mi is assigned for each block in current superblock
764 for (int j = 0; j < AOMMIN(mib_size, mi_params->mi_rows - mi_row)(((mib_size) < (mi_params->mi_rows - mi_row)) ? (mib_size
) : (mi_params->mi_rows - mi_row))
; j++) {
765 for (int k = 0; k < AOMMIN(mib_size, mi_params->mi_cols - mi_col)(((mib_size) < (mi_params->mi_cols - mi_col)) ? (mib_size
) : (mi_params->mi_cols - mi_col))
; k++) {
766 const int grid_idx = get_mi_grid_idx(mi_params, mi_row + j, mi_col + k);
767 mi_params->mi_alloc[grid_idx].delta_lf_from_base = delta_lf;
768 for (int lf_id = 0; lf_id < frame_lf_count; ++lf_id) {
769 mi_params->mi_alloc[grid_idx].delta_lf[lf_id] = delta_lf;
770 }
771 }
772 }
773 }
774
775 x->reuse_inter_pred = false0;
776 x->txfm_search_params.mode_eval_type = DEFAULT_EVAL;
777 reset_mb_rd_record(x->txfm_search_info.mb_rd_record);
778 av1_zero(x->picked_ref_frames_mask)memset(&(x->picked_ref_frames_mask), 0, sizeof(x->picked_ref_frames_mask
))
;
779 av1_invalid_rd_stats(rd_cost);
780}
781
782static int sb_qp_sweep(AV1_COMP *const cpi, ThreadData *td,
783 TileDataEnc *tile_data, TokenExtra **tp, int mi_row,
784 int mi_col, BLOCK_SIZE bsize,
785 SIMPLE_MOTION_DATA_TREE *sms_tree,
786 SB_FIRST_PASS_STATS *sb_org_stats) {
787 AV1_COMMON *const cm = &cpi->common;
788 MACROBLOCK *const x = &td->mb;
789 RD_STATS rdc_winner, cur_rdc;
790 av1_invalid_rd_stats(&rdc_winner);
791
792 int best_qindex = td->mb.rdmult_delta_qindex;
793 const int start = cm->current_frame.frame_type == KEY_FRAME ? -20 : -12;
794 const int end = cm->current_frame.frame_type == KEY_FRAME ? 20 : 12;
795 const int step = cm->delta_q_info.delta_q_res;
796
797 for (int sweep_qp_delta = start; sweep_qp_delta <= end;
798 sweep_qp_delta += step) {
799 sb_qp_sweep_init_quantizers(cpi, td, tile_data, sms_tree, &cur_rdc, mi_row,
800 mi_col, sweep_qp_delta);
801
802 const int alloc_mi_idx = get_alloc_mi_idx(&cm->mi_params, mi_row, mi_col);
803 const int backup_current_qindex =
804 cm->mi_params.mi_alloc[alloc_mi_idx].current_qindex;
805
806 av1_reset_mbmi(&cm->mi_params, bsize, mi_row, mi_col);
807 av1_restore_sb_state(sb_org_stats, cpi, td, tile_data, mi_row, mi_col);
808 cm->mi_params.mi_alloc[alloc_mi_idx].current_qindex = backup_current_qindex;
809
810 td->pc_root = av1_alloc_pc_tree_node(bsize);
811 if (!td->pc_root)
812 aom_internal_error(x->e_mbd.error_info, AOM_CODEC_MEM_ERROR,
813 "Failed to allocate PC_TREE");
814 av1_rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize,
815 &cur_rdc, cur_rdc, td->pc_root, sms_tree, NULL((void*)0),
816 SB_DRY_PASS, NULL((void*)0));
817
818 if ((rdc_winner.rdcost > cur_rdc.rdcost) ||
819 (abs(sweep_qp_delta) < abs(best_qindex - x->rdmult_delta_qindex) &&
820 rdc_winner.rdcost == cur_rdc.rdcost)) {
821 rdc_winner = cur_rdc;
822 best_qindex = x->rdmult_delta_qindex + sweep_qp_delta;
823 }
824 }
825
826 return best_qindex;
827}
828#endif //! CONFIG_REALTIME_ONLY
829
830/*!\brief Encode a superblock (RD-search-based)
831 *
832 * \ingroup partition_search
833 * Conducts partition search for a superblock, based on rate-distortion costs,
834 * from scratch or adjusting from a pre-calculated partition pattern.
835 */
836static inline void encode_rd_sb(AV1_COMP *cpi, ThreadData *td,
837 TileDataEnc *tile_data, TokenExtra **tp,
838 const int mi_row, const int mi_col,
839 const int seg_skip) {
840 AV1_COMMON *const cm = &cpi->common;
841 MACROBLOCK *const x = &td->mb;
842 MACROBLOCKD *const xd = &x->e_mbd;
843 const SPEED_FEATURES *const sf = &cpi->sf;
844 const TileInfo *const tile_info = &tile_data->tile_info;
845 MB_MODE_INFO **mi = cm->mi_params.mi_grid_base +
846 get_mi_grid_idx(&cm->mi_params, mi_row, mi_col);
847 const BLOCK_SIZE sb_size = cm->seq_params->sb_size;
848 const int num_planes = av1_num_planes(cm);
849 int dummy_rate;
850 int64_t dummy_dist;
851 RD_STATS dummy_rdc;
852 SIMPLE_MOTION_DATA_TREE *const sms_root = td->sms_root;
853
854#if CONFIG_REALTIME_ONLY0
855 (void)seg_skip;
856#endif // CONFIG_REALTIME_ONLY
857
858 init_encode_rd_sb(cpi, td, tile_data, sms_root, &dummy_rdc, mi_row, mi_col,
859 1);
860
861 // Encode the superblock
862 if (sf->part_sf.partition_search_type == VAR_BASED_PARTITION) {
863 // partition search starting from a variance-based partition
864 av1_set_offsets(cpi, tile_info, x, mi_row, mi_col, sb_size);
865 av1_choose_var_based_partitioning(cpi, tile_info, td, x, mi_row, mi_col);
866
867#if CONFIG_COLLECT_COMPONENT_TIMING0
868 start_timing(cpi, rd_use_partition_time);
869#endif
870 td->pc_root = av1_alloc_pc_tree_node(sb_size);
871 if (!td->pc_root)
872 aom_internal_error(xd->error_info, AOM_CODEC_MEM_ERROR,
873 "Failed to allocate PC_TREE");
874 av1_rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, sb_size,
875 &dummy_rate, &dummy_dist, 1, td->pc_root);
876 av1_free_pc_tree_recursive(td->pc_root, num_planes, 0, 0,
877 sf->part_sf.partition_search_type);
878 td->pc_root = NULL((void*)0);
879#if CONFIG_COLLECT_COMPONENT_TIMING0
880 end_timing(cpi, rd_use_partition_time);
881#endif
882 }
883#if !CONFIG_REALTIME_ONLY0
884 else if (sf->part_sf.partition_search_type == FIXED_PARTITION || seg_skip) {
885 // partition search by adjusting a fixed-size partition
886 av1_set_offsets(cpi, tile_info, x, mi_row, mi_col, sb_size);
887 const BLOCK_SIZE bsize =
888 seg_skip ? sb_size : sf->part_sf.fixed_partition_size;
889 av1_set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
890 td->pc_root = av1_alloc_pc_tree_node(sb_size);
891 if (!td->pc_root)
892 aom_internal_error(xd->error_info, AOM_CODEC_MEM_ERROR,
893 "Failed to allocate PC_TREE");
894 av1_rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, sb_size,
895 &dummy_rate, &dummy_dist, 1, td->pc_root);
896 av1_free_pc_tree_recursive(td->pc_root, num_planes, 0, 0,
897 sf->part_sf.partition_search_type);
898 td->pc_root = NULL((void*)0);
899 } else {
900 // The most exhaustive recursive partition search
901 SuperBlockEnc *sb_enc = &x->sb_enc;
902 // No stats for overlay frames. Exclude key frame.
903 av1_get_tpl_stats_sb(cpi, sb_size, mi_row, mi_col, sb_enc);
904
905 // Reset the tree for simple motion search data
906 av1_reset_simple_motion_tree_partition(sms_root, sb_size);
907
908#if CONFIG_COLLECT_COMPONENT_TIMING0
909 start_timing(cpi, rd_pick_partition_time);
910#endif
911
912 // Estimate the maximum square partition block size, which will be used
913 // as the starting block size for partitioning the sb
914 set_max_min_partition_size(sb_enc, cpi, x, sf, sb_size, mi_row, mi_col);
915
916 // The superblock can be searched only once, or twice consecutively for
917 // better quality. Note that the meaning of passes here is different from
918 // the general concept of 1-pass/2-pass encoders.
919 const int num_passes =
920 cpi->oxcf.unit_test_cfg.sb_multipass_unit_test ? 2 : 1;
921
922 if (cpi->oxcf.sb_qp_sweep &&
923 !(has_no_stats_stage(cpi) && cpi->oxcf.mode == REALTIME &&
924 cpi->oxcf.gf_cfg.lag_in_frames == 0) &&
925 cm->delta_q_info.delta_q_present_flag) {
926 AOM_CHECK_MEM_ERROR(do { td->mb.sb_stats_cache = ((SB_FIRST_PASS_STATS *)aom_malloc
(sizeof(*td->mb.sb_stats_cache))); if (!td->mb.sb_stats_cache
) aom_internal_error(x->e_mbd.error_info, AOM_CODEC_MEM_ERROR
, "Failed to allocate " "td->mb.sb_stats_cache"); } while (
0)
927 x->e_mbd.error_info, td->mb.sb_stats_cache,do { td->mb.sb_stats_cache = ((SB_FIRST_PASS_STATS *)aom_malloc
(sizeof(*td->mb.sb_stats_cache))); if (!td->mb.sb_stats_cache
) aom_internal_error(x->e_mbd.error_info, AOM_CODEC_MEM_ERROR
, "Failed to allocate " "td->mb.sb_stats_cache"); } while (
0)
928 (SB_FIRST_PASS_STATS *)aom_malloc(sizeof(*td->mb.sb_stats_cache)))do { td->mb.sb_stats_cache = ((SB_FIRST_PASS_STATS *)aom_malloc
(sizeof(*td->mb.sb_stats_cache))); if (!td->mb.sb_stats_cache
) aom_internal_error(x->e_mbd.error_info, AOM_CODEC_MEM_ERROR
, "Failed to allocate " "td->mb.sb_stats_cache"); } while (
0)
;
929 av1_backup_sb_state(td->mb.sb_stats_cache, cpi, td, tile_data, mi_row,
930 mi_col);
931 assert(x->rdmult_delta_qindex == x->delta_qindex)((void) sizeof ((x->rdmult_delta_qindex == x->delta_qindex
) ? 1 : 0), __extension__ ({ if (x->rdmult_delta_qindex ==
x->delta_qindex) ; else __assert_fail ("x->rdmult_delta_qindex == x->delta_qindex"
, "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 931, __extension__ __PRETTY_FUNCTION__); }))
;
932
933 const int best_qp_diff =
934 sb_qp_sweep(cpi, td, tile_data, tp, mi_row, mi_col, sb_size, sms_root,
935 td->mb.sb_stats_cache) -
936 x->rdmult_delta_qindex;
937
938 sb_qp_sweep_init_quantizers(cpi, td, tile_data, sms_root, &dummy_rdc,
939 mi_row, mi_col, best_qp_diff);
940
941 const int alloc_mi_idx = get_alloc_mi_idx(&cm->mi_params, mi_row, mi_col);
942 const int backup_current_qindex =
943 cm->mi_params.mi_alloc[alloc_mi_idx].current_qindex;
944
945 av1_reset_mbmi(&cm->mi_params, sb_size, mi_row, mi_col);
946 av1_restore_sb_state(td->mb.sb_stats_cache, cpi, td, tile_data, mi_row,
947 mi_col);
948
949 cm->mi_params.mi_alloc[alloc_mi_idx].current_qindex =
950 backup_current_qindex;
951 aom_free(td->mb.sb_stats_cache);
952 td->mb.sb_stats_cache = NULL((void*)0);
953 }
954 if (num_passes == 1) {
955#if CONFIG_PARTITION_SEARCH_ORDER0
956 if (cpi->ext_part_controller.ready && !frame_is_intra_only(cm)) {
957 av1_reset_part_sf(&cpi->sf.part_sf);
958 av1_reset_sf_for_ext_part(cpi);
959 RD_STATS this_rdc;
960 av1_rd_partition_search(cpi, td, tile_data, tp, sms_root, mi_row,
961 mi_col, sb_size, &this_rdc);
962 } else {
963 td->pc_root = av1_alloc_pc_tree_node(sb_size);
964 if (!td->pc_root)
965 aom_internal_error(xd->error_info, AOM_CODEC_MEM_ERROR,
966 "Failed to allocate PC_TREE");
967 av1_rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, sb_size,
968 &dummy_rdc, dummy_rdc, td->pc_root, sms_root,
969 NULL((void*)0), SB_SINGLE_PASS, NULL((void*)0));
970 }
971#else
972 td->pc_root = av1_alloc_pc_tree_node(sb_size);
973 if (!td->pc_root)
974 aom_internal_error(xd->error_info, AOM_CODEC_MEM_ERROR,
975 "Failed to allocate PC_TREE");
976 av1_rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, sb_size,
977 &dummy_rdc, dummy_rdc, td->pc_root, sms_root, NULL((void*)0),
978 SB_SINGLE_PASS, NULL((void*)0));
979#endif // CONFIG_PARTITION_SEARCH_ORDER
980 } else {
981 // First pass
982 AOM_CHECK_MEM_ERROR(do { td->mb.sb_fp_stats = ((SB_FIRST_PASS_STATS *)aom_malloc
(sizeof(*td->mb.sb_fp_stats))); if (!td->mb.sb_fp_stats
) aom_internal_error(x->e_mbd.error_info, AOM_CODEC_MEM_ERROR
, "Failed to allocate " "td->mb.sb_fp_stats"); } while (0)
983 x->e_mbd.error_info, td->mb.sb_fp_stats,do { td->mb.sb_fp_stats = ((SB_FIRST_PASS_STATS *)aom_malloc
(sizeof(*td->mb.sb_fp_stats))); if (!td->mb.sb_fp_stats
) aom_internal_error(x->e_mbd.error_info, AOM_CODEC_MEM_ERROR
, "Failed to allocate " "td->mb.sb_fp_stats"); } while (0)
984 (SB_FIRST_PASS_STATS *)aom_malloc(sizeof(*td->mb.sb_fp_stats)))do { td->mb.sb_fp_stats = ((SB_FIRST_PASS_STATS *)aom_malloc
(sizeof(*td->mb.sb_fp_stats))); if (!td->mb.sb_fp_stats
) aom_internal_error(x->e_mbd.error_info, AOM_CODEC_MEM_ERROR
, "Failed to allocate " "td->mb.sb_fp_stats"); } while (0)
;
985 av1_backup_sb_state(td->mb.sb_fp_stats, cpi, td, tile_data, mi_row,
986 mi_col);
987 td->pc_root = av1_alloc_pc_tree_node(sb_size);
988 if (!td->pc_root)
989 aom_internal_error(xd->error_info, AOM_CODEC_MEM_ERROR,
990 "Failed to allocate PC_TREE");
991 av1_rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, sb_size,
992 &dummy_rdc, dummy_rdc, td->pc_root, sms_root, NULL((void*)0),
993 SB_DRY_PASS, NULL((void*)0));
994
995 // Second pass
996 init_encode_rd_sb(cpi, td, tile_data, sms_root, &dummy_rdc, mi_row,
997 mi_col, 0);
998 av1_reset_mbmi(&cm->mi_params, sb_size, mi_row, mi_col);
999 av1_reset_simple_motion_tree_partition(sms_root, sb_size);
1000
1001 av1_restore_sb_state(td->mb.sb_fp_stats, cpi, td, tile_data, mi_row,
1002 mi_col);
1003
1004 td->pc_root = av1_alloc_pc_tree_node(sb_size);
1005 if (!td->pc_root)
1006 aom_internal_error(xd->error_info, AOM_CODEC_MEM_ERROR,
1007 "Failed to allocate PC_TREE");
1008 av1_rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, sb_size,
1009 &dummy_rdc, dummy_rdc, td->pc_root, sms_root, NULL((void*)0),
1010 SB_WET_PASS, NULL((void*)0));
1011 aom_free(td->mb.sb_fp_stats);
1012 td->mb.sb_fp_stats = NULL((void*)0);
1013 }
1014
1015 // Reset to 0 so that it wouldn't be used elsewhere mistakenly.
1016 sb_enc->tpl_data_count = 0;
1017#if CONFIG_COLLECT_COMPONENT_TIMING0
1018 end_timing(cpi, rd_pick_partition_time);
1019#endif
1020 }
1021#endif // !CONFIG_REALTIME_ONLY
1022
1023 // Update the inter rd model
1024 // TODO(angiebird): Let inter_mode_rd_model_estimation support multi-tile.
1025 if (cpi->sf.inter_sf.inter_mode_rd_model_estimation == 1 &&
1026 cm->tiles.cols == 1 && cm->tiles.rows == 1) {
1027 av1_inter_mode_data_fit(tile_data, x->rdmult);
1028 }
1029}
1030
1031// Check if the cost update of symbols mode, coeff and dv are tile or off.
1032static inline int is_mode_coeff_dv_upd_freq_tile_or_off(
1033 const AV1_COMP *const cpi) {
1034 const INTER_MODE_SPEED_FEATURES *const inter_sf = &cpi->sf.inter_sf;
1035
1036 return (inter_sf->coeff_cost_upd_level <= INTERNAL_COST_UPD_TILE &&
1037 inter_sf->mode_cost_upd_level <= INTERNAL_COST_UPD_TILE &&
1038 cpi->sf.intra_sf.dv_cost_upd_level <= INTERNAL_COST_UPD_TILE);
1039}
1040
1041// When row-mt is enabled and cost update frequencies are set to off/tile,
1042// processing of current SB can start even before processing of top-right SB
1043// is finished. This function checks if it is sufficient to wait for top SB
1044// to finish processing before current SB starts processing.
1045static inline int delay_wait_for_top_right_sb(const AV1_COMP *const cpi) {
1046 const MODE mode = cpi->oxcf.mode;
1047 if (mode == GOOD) return 0;
1048
1049 if (mode == ALLINTRA)
1050 return is_mode_coeff_dv_upd_freq_tile_or_off(cpi);
1051 else if (mode == REALTIME)
1052 return (is_mode_coeff_dv_upd_freq_tile_or_off(cpi) &&
1053 cpi->sf.inter_sf.mv_cost_upd_level <= INTERNAL_COST_UPD_TILE);
1054 else
1055 return 0;
1056}
1057
1058/*!\brief Calculate source SAD at superblock level using 64x64 block source SAD
1059 *
1060 * \ingroup partition_search
1061 * \callgraph
1062 * \callergraph
1063 */
1064static inline uint64_t get_sb_source_sad(const AV1_COMP *cpi, int mi_row,
1065 int mi_col) {
1066 if (cpi->src_sad_blk_64x64 == NULL((void*)0)) return UINT64_MAX(18446744073709551615UL);
1067
1068 const AV1_COMMON *const cm = &cpi->common;
1069 const int blk_64x64_in_mis = (cm->seq_params->sb_size == BLOCK_128X128)
1070 ? (cm->seq_params->mib_size >> 1)
1071 : cm->seq_params->mib_size;
1072 const int num_blk_64x64_cols =
1073 (cm->mi_params.mi_cols + blk_64x64_in_mis - 1) / blk_64x64_in_mis;
1074 const int num_blk_64x64_rows =
1075 (cm->mi_params.mi_rows + blk_64x64_in_mis - 1) / blk_64x64_in_mis;
1076 const int blk_64x64_col_index = mi_col / blk_64x64_in_mis;
1077 const int blk_64x64_row_index = mi_row / blk_64x64_in_mis;
1078 uint64_t curr_sb_sad = UINT64_MAX(18446744073709551615UL);
1079 // Avoid the border as sad_blk_64x64 may not be set for the border
1080 // in the scene detection.
1081 if ((blk_64x64_row_index >= num_blk_64x64_rows - 1) ||
1082 (blk_64x64_col_index >= num_blk_64x64_cols - 1)) {
1083 return curr_sb_sad;
1084 }
1085 const uint64_t *const src_sad_blk_64x64_data =
1086 &cpi->src_sad_blk_64x64[blk_64x64_col_index +
1087 blk_64x64_row_index * num_blk_64x64_cols];
1088 if (cm->seq_params->sb_size == BLOCK_128X128) {
1089 // Calculate SB source SAD by accumulating source SAD of 64x64 blocks in the
1090 // superblock
1091 curr_sb_sad = src_sad_blk_64x64_data[0] + src_sad_blk_64x64_data[1] +
1092 src_sad_blk_64x64_data[num_blk_64x64_cols] +
1093 src_sad_blk_64x64_data[num_blk_64x64_cols + 1];
1094 } else if (cm->seq_params->sb_size == BLOCK_64X64) {
1095 curr_sb_sad = src_sad_blk_64x64_data[0];
1096 }
1097 return curr_sb_sad;
1098}
1099
1100/*!\brief Determine whether grading content can be skipped based on sad stat
1101 *
1102 * \ingroup partition_search
1103 * \callgraph
1104 * \callergraph
1105 */
1106static inline bool_Bool is_calc_src_content_needed(AV1_COMP *cpi,
1107 MACROBLOCK *const x, int mi_row,
1108 int mi_col) {
1109 if (cpi->svc.spatial_layer_id < cpi->svc.number_spatial_layers - 1)
1110 return true1;
1111 const uint64_t curr_sb_sad = get_sb_source_sad(cpi, mi_row, mi_col);
1112 if (curr_sb_sad == UINT64_MAX(18446744073709551615UL)) return true1;
1113 if (curr_sb_sad == 0) {
1114 x->content_state_sb.source_sad_nonrd = kZeroSad;
1115 return false0;
1116 }
1117 AV1_COMMON *const cm = &cpi->common;
1118 bool_Bool do_calc_src_content = true1;
1119
1120 if (cpi->oxcf.speed < 9) return do_calc_src_content;
1121
1122 // TODO(yunqing): Tune/validate the thresholds for 128x128 SB size.
1123 if (AOMMIN(cm->width, cm->height)(((cm->width) < (cm->height)) ? (cm->width) : (cm
->height))
< 360) {
1124 // Derive Average 64x64 block source SAD from SB source SAD
1125 const uint64_t avg_64x64_blk_sad =
1126 (cm->seq_params->sb_size == BLOCK_128X128) ? ((curr_sb_sad + 2) >> 2)
1127 : curr_sb_sad;
1128
1129 // The threshold is determined based on kLowSad and kHighSad threshold and
1130 // test results.
1131 uint64_t thresh_low = 15000;
1132 uint64_t thresh_high = 40000;
1133
1134 if (cpi->sf.rt_sf.increase_source_sad_thresh) {
1135 thresh_low = thresh_low << 1;
1136 thresh_high = thresh_high << 1;
1137 }
1138
1139 if (avg_64x64_blk_sad > thresh_low && avg_64x64_blk_sad < thresh_high) {
1140 do_calc_src_content = false0;
1141 // Note: set x->content_state_sb.source_sad_rd as well if this is extended
1142 // to RTC rd path.
1143 x->content_state_sb.source_sad_nonrd = kMedSad;
1144 }
1145 }
1146
1147 return do_calc_src_content;
1148}
1149
1150/*!\brief Determine whether grading content is needed based on sf and frame stat
1151 *
1152 * \ingroup partition_search
1153 * \callgraph
1154 * \callergraph
1155 */
1156// TODO(any): consolidate sfs to make interface cleaner
1157static inline void grade_source_content_sb(AV1_COMP *cpi, MACROBLOCK *const x,
1158 TileDataEnc *tile_data, int mi_row,
1159 int mi_col) {
1160 AV1_COMMON *const cm = &cpi->common;
1161 if (cm->current_frame.frame_type == KEY_FRAME ||
1162 (cpi->ppi->use_svc &&
1163 cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame)) {
1164 assert(x->content_state_sb.source_sad_nonrd == kMedSad)((void) sizeof ((x->content_state_sb.source_sad_nonrd == kMedSad
) ? 1 : 0), __extension__ ({ if (x->content_state_sb.source_sad_nonrd
== kMedSad) ; else __assert_fail ("x->content_state_sb.source_sad_nonrd == kMedSad"
, "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 1164, __extension__ __PRETTY_FUNCTION__); }))
;
1165 assert(x->content_state_sb.source_sad_rd == kMedSad)((void) sizeof ((x->content_state_sb.source_sad_rd == kMedSad
) ? 1 : 0), __extension__ ({ if (x->content_state_sb.source_sad_rd
== kMedSad) ; else __assert_fail ("x->content_state_sb.source_sad_rd == kMedSad"
, "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 1165, __extension__ __PRETTY_FUNCTION__); }))
;
1166 return;
1167 }
1168 bool_Bool calc_src_content = false0;
1169
1170 if (cpi->sf.rt_sf.source_metrics_sb_nonrd) {
1171 if (!cpi->sf.rt_sf.check_scene_detection || cpi->rc.frame_source_sad > 0) {
1172 calc_src_content = is_calc_src_content_needed(cpi, x, mi_row, mi_col);
1173 } else {
1174 x->content_state_sb.source_sad_nonrd = kZeroSad;
1175 }
1176 } else if ((cpi->sf.rt_sf.var_part_based_on_qidx >= 1) &&
1177 (cm->width * cm->height <= 352 * 288)) {
1178 if (cpi->rc.frame_source_sad > 0)
1179 calc_src_content = true1;
1180 else
1181 x->content_state_sb.source_sad_rd = kZeroSad;
1182 }
1183 if (calc_src_content)
1184 av1_source_content_sb(cpi, x, tile_data, mi_row, mi_col);
1185}
1186
1187/*!\brief Encode a superblock row by breaking it into superblocks
1188 *
1189 * \ingroup partition_search
1190 * \callgraph
1191 * \callergraph
1192 * Do partition and mode search for an sb row: one row of superblocks filling up
1193 * the width of the current tile.
1194 */
1195static inline void encode_sb_row(AV1_COMP *cpi, ThreadData *td,
1196 TileDataEnc *tile_data, int mi_row,
1197 TokenExtra **tp) {
1198 AV1_COMMON *const cm = &cpi->common;
1199 const TileInfo *const tile_info = &tile_data->tile_info;
1200 MultiThreadInfo *const mt_info = &cpi->mt_info;
1201 AV1EncRowMultiThreadInfo *const enc_row_mt = &mt_info->enc_row_mt;
1202 AV1EncRowMultiThreadSync *const row_mt_sync = &tile_data->row_mt_sync;
1203 bool_Bool row_mt_enabled = mt_info->row_mt_enabled;
1204 MACROBLOCK *const x = &td->mb;
1205 MACROBLOCKD *const xd = &x->e_mbd;
1206 const int sb_cols_in_tile = av1_get_sb_cols_in_tile(cm, tile_info);
1207 const BLOCK_SIZE sb_size = cm->seq_params->sb_size;
1208 const int mib_size = cm->seq_params->mib_size;
1209 const int mib_size_log2 = cm->seq_params->mib_size_log2;
1210 const int sb_row = (mi_row - tile_info->mi_row_start) >> mib_size_log2;
1211 const int use_nonrd_mode = cpi->sf.rt_sf.use_nonrd_pick_mode;
1212
1213#if CONFIG_COLLECT_COMPONENT_TIMING0
1214 start_timing(cpi, encode_sb_row_time);
1215#endif
1216
1217 // Initialize the left context for the new SB row
1218 av1_zero_left_context(xd);
1219
1220 // Reset delta for quantizer and loof filters at the beginning of every tile
1221 if (mi_row
21.1
'mi_row' is equal to field 'mi_row_start'
== tile_info->mi_row_start || row_mt_enabled) {
1222 if (cm->delta_q_info.delta_q_present_flag)
22
Assuming field 'delta_q_present_flag' is 0
23
Taking false branch
1223 xd->current_base_qindex = cm->quant_params.base_qindex;
1224 if (cm->delta_q_info.delta_lf_present_flag) {
24
Assuming field 'delta_lf_present_flag' is 0
25
Taking false branch
1225 av1_reset_loop_filter_delta(xd, av1_num_planes(cm));
1226 }
1227 }
1228
1229 reset_thresh_freq_fact(x);
1230
1231 // Code each SB in the row
1232 for (int mi_col = tile_info->mi_col_start, sb_col_in_tile = 0;
27
Loop condition is false. Execution continues on line 1511
1233 mi_col < tile_info->mi_col_end; mi_col += mib_size, sb_col_in_tile++) {
26
Assuming 'mi_col' is >= field 'mi_col_end'
1234 // In realtime/allintra mode and when frequency of cost updates is off/tile,
1235 // wait for the top superblock to finish encoding. Otherwise, wait for the
1236 // top-right superblock to finish encoding.
1237 enc_row_mt->sync_read_ptr(
1238 row_mt_sync, sb_row, sb_col_in_tile - delay_wait_for_top_right_sb(cpi));
1239
1240#if CONFIG_MULTITHREAD1
1241 if (row_mt_enabled) {
1242 pthread_mutex_lock(enc_row_mt->mutex_);
1243 const bool_Bool row_mt_exit = enc_row_mt->row_mt_exit;
1244 pthread_mutex_unlock(enc_row_mt->mutex_);
1245 // Exit in case any worker has encountered an error.
1246 if (row_mt_exit) return;
1247 }
1248#endif
1249
1250 const int update_cdf = tile_data->allow_update_cdf && row_mt_enabled;
1251 if (update_cdf && (tile_info->mi_row_start != mi_row)) {
1252 if ((tile_info->mi_col_start == mi_col)) {
1253 // restore frame context at the 1st column sb
1254 *xd->tile_ctx = *x->row_ctx;
1255 } else {
1256 // update context
1257 int wt_left = AVG_CDF_WEIGHT_LEFT3;
1258 int wt_tr = AVG_CDF_WEIGHT_TOP_RIGHT1;
1259 if (tile_info->mi_col_end > (mi_col + mib_size))
1260 av1_avg_cdf_symbols(xd->tile_ctx, x->row_ctx + sb_col_in_tile,
1261 wt_left, wt_tr);
1262 else
1263 av1_avg_cdf_symbols(xd->tile_ctx, x->row_ctx + sb_col_in_tile - 1,
1264 wt_left, wt_tr);
1265 }
1266 }
1267
1268 // Update the rate cost tables for some symbols
1269 av1_set_cost_upd_freq(cpi, td, tile_info, mi_row, mi_col);
1270
1271 // Reset color coding related parameters
1272 av1_zero(x->color_sensitivity_sb)memset(&(x->color_sensitivity_sb), 0, sizeof(x->color_sensitivity_sb
))
;
1273 av1_zero(x->color_sensitivity_sb_g)memset(&(x->color_sensitivity_sb_g), 0, sizeof(x->color_sensitivity_sb_g
))
;
1274 av1_zero(x->color_sensitivity_sb_alt)memset(&(x->color_sensitivity_sb_alt), 0, sizeof(x->
color_sensitivity_sb_alt))
;
1275 av1_zero(x->color_sensitivity)memset(&(x->color_sensitivity), 0, sizeof(x->color_sensitivity
))
;
1276 x->content_state_sb.source_sad_nonrd = kMedSad;
1277 x->content_state_sb.source_sad_rd = kMedSad;
1278 x->content_state_sb.lighting_change = 0;
1279 x->content_state_sb.low_sumdiff = 0;
1280 x->force_zeromv_skip_for_sb = 0;
1281 x->sb_me_block = 0;
1282 x->sb_me_partition = 0;
1283 x->sb_me_mv.as_int = 0;
1284 x->sb_col_scroll = 0;
1285 x->sb_row_scroll = 0;
1286 x->sb_force_fixed_part = 1;
1287 x->color_palette_thresh = 64;
1288 x->force_color_check_block_level = 0;
1289 x->nonrd_prune_ref_frame_search =
1290 cpi->sf.rt_sf.nonrd_prune_ref_frame_search;
1291
1292 if (cpi->oxcf.mode == ALLINTRA) {
1293 x->intra_sb_rdmult_modifier = 128;
1294 }
1295
1296 xd->cur_frame_force_integer_mv = cm->features.cur_frame_force_integer_mv;
1297 x->source_variance = UINT_MAX(2147483647 *2U +1U);
1298 td->mb.cb_coef_buff = av1_get_cb_coeff_buffer(cpi, mi_row, mi_col);
1299
1300 // Get segment id and skip flag
1301 const struct segmentation *const seg = &cm->seg;
1302 int seg_skip = 0;
1303 if (seg->enabled) {
1304 const uint8_t *const map =
1305 seg->update_map ? cpi->enc_seg.map : cm->last_frame_seg_map;
1306 const uint8_t segment_id =
1307 map ? get_segment_id(&cm->mi_params, map, sb_size, mi_row, mi_col)
1308 : 0;
1309 seg_skip = segfeature_active(seg, segment_id, SEG_LVL_SKIP);
1310 }
1311
1312 produce_gradients_for_sb(cpi, x, sb_size, mi_row, mi_col);
1313
1314 init_src_var_info_of_4x4_sub_blocks(cpi, x->src_var_info_of_4x4_sub_blocks,
1315 sb_size);
1316
1317 // Grade the temporal variation of the sb, the grade will be used to decide
1318 // fast mode search strategy for coding blocks
1319 if (!seg_skip) grade_source_content_sb(cpi, x, tile_data, mi_row, mi_col);
1320
1321 // encode the superblock
1322 if (use_nonrd_mode) {
1323 encode_nonrd_sb(cpi, td, tile_data, tp, mi_row, mi_col, seg_skip);
1324 } else {
1325 encode_rd_sb(cpi, td, tile_data, tp, mi_row, mi_col, seg_skip);
1326 }
1327
1328 // Update the top-right context in row_mt coding
1329 if (update_cdf && (tile_info->mi_row_end > (mi_row + mib_size))) {
1330 if (sb_cols_in_tile == 1)
1331 x->row_ctx[0] = *xd->tile_ctx;
1332 else if (sb_col_in_tile >= 1)
1333 x->row_ctx[sb_col_in_tile - 1] = *xd->tile_ctx;
1334 }
1335 enc_row_mt->sync_write_ptr(row_mt_sync, sb_row, sb_col_in_tile,
1336 sb_cols_in_tile);
1337 }
1338
1339#if CONFIG_COLLECT_COMPONENT_TIMING0
1340 end_timing(cpi, encode_sb_row_time);
1341#endif
1342}
28
Returning without writing to '*tp'
1343
1344static inline void init_encode_frame_mb_context(AV1_COMP *cpi) {
1345 AV1_COMMON *const cm = &cpi->common;
1346 const int num_planes = av1_num_planes(cm);
1347 MACROBLOCK *const x = &cpi->td.mb;
1348 MACROBLOCKD *const xd = &x->e_mbd;
1349
1350 // Copy data over into macro block data structures.
1351 av1_setup_src_planes(x, cpi->source, 0, 0, num_planes,
1352 cm->seq_params->sb_size);
1353
1354 av1_setup_block_planes(xd, cm->seq_params->subsampling_x,
1355 cm->seq_params->subsampling_y, num_planes);
1356}
1357
1358void av1_alloc_tile_data(AV1_COMP *cpi) {
1359 AV1_COMMON *const cm = &cpi->common;
1360 AV1EncRowMultiThreadInfo *const enc_row_mt = &cpi->mt_info.enc_row_mt;
1361 const int tile_cols = cm->tiles.cols;
1362 const int tile_rows = cm->tiles.rows;
1363
1364 av1_row_mt_mem_dealloc(cpi);
1365
1366 aom_free(cpi->tile_data);
1367 cpi->allocated_tiles = 0;
1368 enc_row_mt->allocated_tile_cols = 0;
1369 enc_row_mt->allocated_tile_rows = 0;
1370
1371 CHECK_MEM_ERROR(do { cpi->tile_data = (aom_memalign(32, tile_cols * tile_rows
* sizeof(*cpi->tile_data))); if (!cpi->tile_data) aom_internal_error
((cm)->error, AOM_CODEC_MEM_ERROR, "Failed to allocate " "cpi->tile_data"
); } while (0)
1372 cm, cpi->tile_data,do { cpi->tile_data = (aom_memalign(32, tile_cols * tile_rows
* sizeof(*cpi->tile_data))); if (!cpi->tile_data) aom_internal_error
((cm)->error, AOM_CODEC_MEM_ERROR, "Failed to allocate " "cpi->tile_data"
); } while (0)
1373 aom_memalign(32, tile_cols * tile_rows * sizeof(*cpi->tile_data)))do { cpi->tile_data = (aom_memalign(32, tile_cols * tile_rows
* sizeof(*cpi->tile_data))); if (!cpi->tile_data) aom_internal_error
((cm)->error, AOM_CODEC_MEM_ERROR, "Failed to allocate " "cpi->tile_data"
); } while (0)
;
1374
1375 cpi->allocated_tiles = tile_cols * tile_rows;
1376 enc_row_mt->allocated_tile_cols = tile_cols;
1377 enc_row_mt->allocated_tile_rows = tile_rows;
1378 for (int tile_row = 0; tile_row < tile_rows; ++tile_row) {
1379 for (int tile_col = 0; tile_col < tile_cols; ++tile_col) {
1380 const int tile_index = tile_row * tile_cols + tile_col;
1381 TileDataEnc *const this_tile = &cpi->tile_data[tile_index];
1382 av1_zero(this_tile->row_mt_sync)memset(&(this_tile->row_mt_sync), 0, sizeof(this_tile->
row_mt_sync))
;
1383 this_tile->row_ctx = NULL((void*)0);
1384 }
1385 }
1386}
1387
1388void av1_init_tile_data(AV1_COMP *cpi) {
1389 AV1_COMMON *const cm = &cpi->common;
1390 const int num_planes = av1_num_planes(cm);
1391 const int tile_cols = cm->tiles.cols;
1392 const int tile_rows = cm->tiles.rows;
1393 int tile_col, tile_row;
1394 TokenInfo *const token_info = &cpi->token_info;
1395 TokenExtra *pre_tok = token_info->tile_tok[0][0];
1396 TokenList *tplist = token_info->tplist[0][0];
1397 unsigned int tile_tok = 0;
1398 int tplist_count = 0;
1399
1400 if (!is_stat_generation_stage(cpi) &&
1401 cm->features.allow_screen_content_tools) {
1402 // Number of tokens for which token info needs to be allocated.
1403 unsigned int tokens_required =
1404 get_token_alloc(cm->mi_params.mb_rows, cm->mi_params.mb_cols,
1405 MAX_SB_SIZE_LOG27, num_planes);
1406 // Allocate/reallocate memory for token related info if the number of tokens
1407 // required is more than the number of tokens already allocated. This could
1408 // occur in case of the following:
1409 // 1) If the memory is not yet allocated
1410 // 2) If the frame dimensions have changed
1411 const bool_Bool realloc_tokens = tokens_required > token_info->tokens_allocated;
1412 if (realloc_tokens) {
1413 free_token_info(token_info);
1414 alloc_token_info(cm, token_info, tokens_required);
1415 pre_tok = token_info->tile_tok[0][0];
1416 tplist = token_info->tplist[0][0];
1417 }
1418 }
1419
1420 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
1421 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
1422 TileDataEnc *const tile_data =
1423 &cpi->tile_data[tile_row * tile_cols + tile_col];
1424 TileInfo *const tile_info = &tile_data->tile_info;
1425 av1_tile_init(tile_info, cm, tile_row, tile_col);
1426 tile_data->firstpass_top_mv = kZeroMv;
1427 tile_data->abs_sum_level = 0;
1428
1429 if (is_token_info_allocated(token_info)) {
1430 token_info->tile_tok[tile_row][tile_col] = pre_tok + tile_tok;
1431 pre_tok = token_info->tile_tok[tile_row][tile_col];
1432 tile_tok = allocated_tokens(
1433 tile_info, cm->seq_params->mib_size_log2 + MI_SIZE_LOG22,
1434 num_planes);
1435 token_info->tplist[tile_row][tile_col] = tplist + tplist_count;
1436 tplist = token_info->tplist[tile_row][tile_col];
1437 tplist_count = av1_get_sb_rows_in_tile(cm, tile_info);
1438 }
1439 tile_data->allow_update_cdf = !cm->tiles.large_scale;
1440 tile_data->allow_update_cdf = tile_data->allow_update_cdf &&
1441 !cm->features.disable_cdf_update &&
1442 !delay_wait_for_top_right_sb(cpi);
1443 tile_data->tctx = *cm->fc;
1444 }
1445 }
1446}
1447
1448// Populate the start palette token info prior to encoding an SB row.
1449static inline void get_token_start(AV1_COMP *cpi, const TileInfo *tile_info,
1450 int tile_row, int tile_col, int mi_row,
1451 TokenExtra **tp) {
1452 const TokenInfo *token_info = &cpi->token_info;
1453 if (!is_token_info_allocated(token_info)) return;
18
Taking true branch
19
Returning without writing to '*tp'
1454
1455 const AV1_COMMON *cm = &cpi->common;
1456 const int num_planes = av1_num_planes(cm);
1457 TokenList *const tplist = cpi->token_info.tplist[tile_row][tile_col];
1458 const int sb_row_in_tile =
1459 (mi_row - tile_info->mi_row_start) >> cm->seq_params->mib_size_log2;
1460
1461 get_start_tok(cpi, tile_row, tile_col, mi_row, tp,
1462 cm->seq_params->mib_size_log2 + MI_SIZE_LOG22, num_planes);
1463 assert(tplist != NULL)((void) sizeof ((tplist != ((void*)0)) ? 1 : 0), __extension__
({ if (tplist != ((void*)0)) ; else __assert_fail ("tplist != NULL"
, "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 1463, __extension__ __PRETTY_FUNCTION__); }))
;
1464 tplist[sb_row_in_tile].start = *tp;
1465}
1466
1467// Populate the token count after encoding an SB row.
1468static inline void populate_token_count(AV1_COMP *cpi,
1469 const TileInfo *tile_info, int tile_row,
1470 int tile_col, int mi_row,
1471 TokenExtra *tok) {
1472 const TokenInfo *token_info = &cpi->token_info;
1473 if (!is_token_info_allocated(token_info)) return;
32
Taking false branch
1474
1475 const AV1_COMMON *cm = &cpi->common;
1476 const int num_planes = av1_num_planes(cm);
1477 TokenList *const tplist = token_info->tplist[tile_row][tile_col];
1478 const int sb_row_in_tile =
1479 (mi_row - tile_info->mi_row_start) >> cm->seq_params->mib_size_log2;
33
Assuming right operand of bit shift is non-negative but less than 32
1480 const int tile_mb_cols =
1481 (tile_info->mi_col_end - tile_info->mi_col_start + 2) >> 2;
1482 const int num_mb_rows_in_sb =
1483 ((1 << (cm->seq_params->mib_size_log2 + MI_SIZE_LOG22)) + 8) >> 4;
34
Assuming right operand of bit shift is less than 32
1484 tplist[sb_row_in_tile].count =
1485 (unsigned int)(tok - tplist[sb_row_in_tile].start);
35
Subtraction of a null pointer (from variable 'tok') and a probably non-null pointer (via field 'start') may result in undefined behavior
1486
1487 assert((unsigned int)(tok - tplist[sb_row_in_tile].start) <=((void) sizeof (((unsigned int)(tok - tplist[sb_row_in_tile].
start) <= get_token_alloc(num_mb_rows_in_sb, tile_mb_cols,
cm->seq_params->mib_size_log2 + 2, num_planes)) ? 1 : 0
), __extension__ ({ if ((unsigned int)(tok - tplist[sb_row_in_tile
].start) <= get_token_alloc(num_mb_rows_in_sb, tile_mb_cols
, cm->seq_params->mib_size_log2 + 2, num_planes)) ; else
__assert_fail ("(unsigned int)(tok - tplist[sb_row_in_tile].start) <= get_token_alloc(num_mb_rows_in_sb, tile_mb_cols, cm->seq_params->mib_size_log2 + MI_SIZE_LOG2, num_planes)"
, "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 1490, __extension__ __PRETTY_FUNCTION__); }))
1488 get_token_alloc(num_mb_rows_in_sb, tile_mb_cols,((void) sizeof (((unsigned int)(tok - tplist[sb_row_in_tile].
start) <= get_token_alloc(num_mb_rows_in_sb, tile_mb_cols,
cm->seq_params->mib_size_log2 + 2, num_planes)) ? 1 : 0
), __extension__ ({ if ((unsigned int)(tok - tplist[sb_row_in_tile
].start) <= get_token_alloc(num_mb_rows_in_sb, tile_mb_cols
, cm->seq_params->mib_size_log2 + 2, num_planes)) ; else
__assert_fail ("(unsigned int)(tok - tplist[sb_row_in_tile].start) <= get_token_alloc(num_mb_rows_in_sb, tile_mb_cols, cm->seq_params->mib_size_log2 + MI_SIZE_LOG2, num_planes)"
, "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 1490, __extension__ __PRETTY_FUNCTION__); }))
1489 cm->seq_params->mib_size_log2 + MI_SIZE_LOG2,((void) sizeof (((unsigned int)(tok - tplist[sb_row_in_tile].
start) <= get_token_alloc(num_mb_rows_in_sb, tile_mb_cols,
cm->seq_params->mib_size_log2 + 2, num_planes)) ? 1 : 0
), __extension__ ({ if ((unsigned int)(tok - tplist[sb_row_in_tile
].start) <= get_token_alloc(num_mb_rows_in_sb, tile_mb_cols
, cm->seq_params->mib_size_log2 + 2, num_planes)) ; else
__assert_fail ("(unsigned int)(tok - tplist[sb_row_in_tile].start) <= get_token_alloc(num_mb_rows_in_sb, tile_mb_cols, cm->seq_params->mib_size_log2 + MI_SIZE_LOG2, num_planes)"
, "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 1490, __extension__ __PRETTY_FUNCTION__); }))
1490 num_planes))((void) sizeof (((unsigned int)(tok - tplist[sb_row_in_tile].
start) <= get_token_alloc(num_mb_rows_in_sb, tile_mb_cols,
cm->seq_params->mib_size_log2 + 2, num_planes)) ? 1 : 0
), __extension__ ({ if ((unsigned int)(tok - tplist[sb_row_in_tile
].start) <= get_token_alloc(num_mb_rows_in_sb, tile_mb_cols
, cm->seq_params->mib_size_log2 + 2, num_planes)) ; else
__assert_fail ("(unsigned int)(tok - tplist[sb_row_in_tile].start) <= get_token_alloc(num_mb_rows_in_sb, tile_mb_cols, cm->seq_params->mib_size_log2 + MI_SIZE_LOG2, num_planes)"
, "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 1490, __extension__ __PRETTY_FUNCTION__); }))
;
1491
1492 (void)num_planes;
1493 (void)tile_mb_cols;
1494 (void)num_mb_rows_in_sb;
1495}
1496
1497/*!\brief Encode a superblock row
1498 *
1499 * \ingroup partition_search
1500 */
1501void av1_encode_sb_row(AV1_COMP *cpi, ThreadData *td, int tile_row,
1502 int tile_col, int mi_row) {
1503 AV1_COMMON *const cm = &cpi->common;
1504 const int tile_cols = cm->tiles.cols;
1505 TileDataEnc *this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
1506 const TileInfo *const tile_info = &this_tile->tile_info;
1507 TokenExtra *tok = NULL((void*)0);
16
'tok' initialized to a null pointer value
1508
1509 get_token_start(cpi, tile_info, tile_row, tile_col, mi_row, &tok);
17
Calling 'get_token_start'
20
Returning from 'get_token_start'
1510
1511 encode_sb_row(cpi, td, this_tile, mi_row, &tok);
21
Calling 'encode_sb_row'
29
Returning from 'encode_sb_row'
1512
1513 populate_token_count(cpi, tile_info, tile_row, tile_col, mi_row, tok);
30
Passing null pointer value via 6th parameter 'tok'
31
Calling 'populate_token_count'
1514}
1515
1516/*!\brief Encode a tile
1517 *
1518 * \ingroup partition_search
1519 */
1520void av1_encode_tile(AV1_COMP *cpi, ThreadData *td, int tile_row,
1521 int tile_col) {
1522 AV1_COMMON *const cm = &cpi->common;
1523 TileDataEnc *const this_tile =
1524 &cpi->tile_data[tile_row * cm->tiles.cols + tile_col];
1525 const TileInfo *const tile_info = &this_tile->tile_info;
1526
1527 if (!cpi->sf.rt_sf.use_nonrd_pick_mode) av1_inter_mode_data_init(this_tile);
7
Assuming field 'use_nonrd_pick_mode' is not equal to 0
8
Taking false branch
1528
1529 av1_zero_above_context(cm, &td->mb.e_mbd, tile_info->mi_col_start,
1530 tile_info->mi_col_end, tile_row);
1531 av1_init_above_context(&cm->above_contexts, av1_num_planes(cm), tile_row,
1532 &td->mb.e_mbd);
1533
1534#if !CONFIG_REALTIME_ONLY0
1535 if (cpi->oxcf.intra_mode_cfg.enable_cfl_intra)
9
Assuming field 'enable_cfl_intra' is false
10
Taking false branch
1536 cfl_init(&td->mb.e_mbd.cfl, cm->seq_params);
1537#endif
1538
1539 if (td->mb.txfm_search_info.mb_rd_record != NULL((void*)0)) {
11
Assuming field 'mb_rd_record' is equal to NULL
12
Taking false branch
1540 av1_crc32c_calculator_init(
1541 &td->mb.txfm_search_info.mb_rd_record->crc_calculator);
1542 }
1543
1544 for (int mi_row = tile_info->mi_row_start; mi_row < tile_info->mi_row_end;
13
Assuming 'mi_row' is < field 'mi_row_end'
14
Loop condition is true. Entering loop body
1545 mi_row += cm->seq_params->mib_size) {
1546 av1_encode_sb_row(cpi, td, tile_row, tile_col, mi_row);
15
Calling 'av1_encode_sb_row'
1547 }
1548 this_tile->abs_sum_level = td->abs_sum_level;
1549}
1550
1551/*!\brief Break one frame into tiles and encode the tiles
1552 *
1553 * \ingroup partition_search
1554 *
1555 * \param[in] cpi Top-level encoder structure
1556 */
1557static inline void encode_tiles(AV1_COMP *cpi) {
1558 AV1_COMMON *const cm = &cpi->common;
1559 const int tile_cols = cm->tiles.cols;
1560 const int tile_rows = cm->tiles.rows;
1561 int tile_col, tile_row;
1562
1563 MACROBLOCK *const mb = &cpi->td.mb;
1564 assert(IMPLIES(cpi->tile_data == NULL,((void) sizeof (((!(cpi->tile_data == ((void*)0)) || (cpi->
allocated_tiles < tile_cols * tile_rows))) ? 1 : 0), __extension__
({ if ((!(cpi->tile_data == ((void*)0)) || (cpi->allocated_tiles
< tile_cols * tile_rows))) ; else __assert_fail ("IMPLIES(cpi->tile_data == NULL, cpi->allocated_tiles < tile_cols * tile_rows)"
, "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 1565, __extension__ __PRETTY_FUNCTION__); }))
1
Assuming field 'tile_data' is not equal to null
1565 cpi->allocated_tiles < tile_cols * tile_rows))((void) sizeof (((!(cpi->tile_data == ((void*)0)) || (cpi->
allocated_tiles < tile_cols * tile_rows))) ? 1 : 0), __extension__
({ if ((!(cpi->tile_data == ((void*)0)) || (cpi->allocated_tiles
< tile_cols * tile_rows))) ; else __assert_fail ("IMPLIES(cpi->tile_data == NULL, cpi->allocated_tiles < tile_cols * tile_rows)"
, "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 1565, __extension__ __PRETTY_FUNCTION__); }))
;
1566 if (cpi->allocated_tiles < tile_cols * tile_rows) av1_alloc_tile_data(cpi);
2
Assuming the condition is false
3
Taking false branch
1567
1568 av1_init_tile_data(cpi);
1569 av1_alloc_mb_data(cpi, mb);
1570
1571 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
4
Loop condition is true. Entering loop body
1572 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
5
Loop condition is true. Entering loop body
1573 TileDataEnc *const this_tile =
1574 &cpi->tile_data[tile_row * cm->tiles.cols + tile_col];
1575 cpi->td.intrabc_used = 0;
1576 cpi->td.deltaq_used = 0;
1577 cpi->td.abs_sum_level = 0;
1578 cpi->td.rd_counts.seg_tmp_pred_cost[0] = 0;
1579 cpi->td.rd_counts.seg_tmp_pred_cost[1] = 0;
1580 cpi->td.mb.e_mbd.tile_ctx = &this_tile->tctx;
1581 cpi->td.mb.tile_pb_ctx = &this_tile->tctx;
1582 av1_init_rtc_counters(&cpi->td.mb);
1583 cpi->td.mb.palette_pixels = 0;
1584 av1_encode_tile(cpi, &cpi->td, tile_row, tile_col);
6
Calling 'av1_encode_tile'
1585 if (!frame_is_intra_only(&cpi->common))
1586 av1_accumulate_rtc_counters(cpi, &cpi->td.mb);
1587 cpi->palette_pixel_num += cpi->td.mb.palette_pixels;
1588 cpi->intrabc_used |= cpi->td.intrabc_used;
1589 cpi->deltaq_used |= cpi->td.deltaq_used;
1590 }
1591 }
1592
1593 av1_dealloc_mb_data(mb, av1_num_planes(cm));
1594}
1595
1596// Set the relative distance of a reference frame w.r.t. current frame
1597static inline void set_rel_frame_dist(
1598 const AV1_COMMON *const cm, RefFrameDistanceInfo *const ref_frame_dist_info,
1599 const int ref_frame_flags) {
1600 MV_REFERENCE_FRAME ref_frame;
1601 int min_past_dist = INT32_MAX(2147483647), min_future_dist = INT32_MAX(2147483647);
1602 ref_frame_dist_info->nearest_past_ref = NONE_FRAME;
1603 ref_frame_dist_info->nearest_future_ref = NONE_FRAME;
1604 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
1605 ref_frame_dist_info->ref_relative_dist[ref_frame - LAST_FRAME] = 0;
1606 if (ref_frame_flags & av1_ref_frame_flag_list[ref_frame]) {
1607 int dist = av1_encoder_get_relative_dist(
1608 cm->cur_frame->ref_display_order_hint[ref_frame - LAST_FRAME],
1609 cm->current_frame.display_order_hint);
1610 ref_frame_dist_info->ref_relative_dist[ref_frame - LAST_FRAME] = dist;
1611 // Get the nearest ref_frame in the past
1612 if (abs(dist) < min_past_dist && dist < 0) {
1613 ref_frame_dist_info->nearest_past_ref = ref_frame;
1614 min_past_dist = abs(dist);
1615 }
1616 // Get the nearest ref_frame in the future
1617 if (dist < min_future_dist && dist > 0) {
1618 ref_frame_dist_info->nearest_future_ref = ref_frame;
1619 min_future_dist = dist;
1620 }
1621 }
1622 }
1623}
1624
1625static inline int refs_are_one_sided(const AV1_COMMON *cm) {
1626 assert(!frame_is_intra_only(cm))((void) sizeof ((!frame_is_intra_only(cm)) ? 1 : 0), __extension__
({ if (!frame_is_intra_only(cm)) ; else __assert_fail ("!frame_is_intra_only(cm)"
, "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 1626, __extension__ __PRETTY_FUNCTION__); }))
;
1627
1628 int one_sided_refs = 1;
1629 const int cur_display_order_hint = cm->current_frame.display_order_hint;
1630 for (int ref = LAST_FRAME; ref <= ALTREF_FRAME; ++ref) {
1631 const RefCntBuffer *const buf = get_ref_frame_buf(cm, ref);
1632 if (buf == NULL((void*)0)) continue;
1633 if (av1_encoder_get_relative_dist(buf->display_order_hint,
1634 cur_display_order_hint) > 0) {
1635 one_sided_refs = 0; // bwd reference
1636 break;
1637 }
1638 }
1639 return one_sided_refs;
1640}
1641
1642static inline void get_skip_mode_ref_offsets(const AV1_COMMON *cm,
1643 int ref_order_hint[2]) {
1644 const SkipModeInfo *const skip_mode_info = &cm->current_frame.skip_mode_info;
1645 ref_order_hint[0] = ref_order_hint[1] = 0;
1646 if (!skip_mode_info->skip_mode_allowed) return;
1647
1648 const RefCntBuffer *const buf_0 =
1649 get_ref_frame_buf(cm, LAST_FRAME + skip_mode_info->ref_frame_idx_0);
1650 const RefCntBuffer *const buf_1 =
1651 get_ref_frame_buf(cm, LAST_FRAME + skip_mode_info->ref_frame_idx_1);
1652 assert(buf_0 != NULL && buf_1 != NULL)((void) sizeof ((buf_0 != ((void*)0) && buf_1 != ((void
*)0)) ? 1 : 0), __extension__ ({ if (buf_0 != ((void*)0) &&
buf_1 != ((void*)0)) ; else __assert_fail ("buf_0 != NULL && buf_1 != NULL"
, "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 1652, __extension__ __PRETTY_FUNCTION__); }))
;
1653
1654 ref_order_hint[0] = buf_0->order_hint;
1655 ref_order_hint[1] = buf_1->order_hint;
1656}
1657
1658static int check_skip_mode_enabled(AV1_COMP *const cpi) {
1659 AV1_COMMON *const cm = &cpi->common;
1660
1661 av1_setup_skip_mode_allowed(cm);
1662 if (!cm->current_frame.skip_mode_info.skip_mode_allowed) return 0;
1663
1664 // Turn off skip mode if the temporal distances of the reference pair to the
1665 // current frame are different by more than 1 frame.
1666 const int cur_offset = (int)cm->current_frame.order_hint;
1667 int ref_offset[2];
1668 get_skip_mode_ref_offsets(cm, ref_offset);
1669 const int cur_to_ref0 = get_relative_dist(&cm->seq_params->order_hint_info,
1670 cur_offset, ref_offset[0]);
1671 const int cur_to_ref1 = abs(get_relative_dist(
1672 &cm->seq_params->order_hint_info, cur_offset, ref_offset[1]));
1673 if (abs(cur_to_ref0 - cur_to_ref1) > 1) return 0;
1674
1675 // High Latency: Turn off skip mode if all refs are fwd.
1676 if (cpi->all_one_sided_refs && cpi->oxcf.gf_cfg.lag_in_frames > 0) return 0;
1677
1678 const int ref_frame[2] = {
1679 cm->current_frame.skip_mode_info.ref_frame_idx_0 + LAST_FRAME,
1680 cm->current_frame.skip_mode_info.ref_frame_idx_1 + LAST_FRAME
1681 };
1682 if (!(cpi->ref_frame_flags & av1_ref_frame_flag_list[ref_frame[0]]) ||
1683 !(cpi->ref_frame_flags & av1_ref_frame_flag_list[ref_frame[1]]))
1684 return 0;
1685
1686 return 1;
1687}
1688
1689static inline void set_default_interp_skip_flags(
1690 const AV1_COMMON *cm, InterpSearchFlags *interp_search_flags) {
1691 const int num_planes = av1_num_planes(cm);
1692 interp_search_flags->default_interp_skip_flags =
1693 (num_planes == 1) ? INTERP_SKIP_LUMA_EVAL_CHROMA
1694 : INTERP_SKIP_LUMA_SKIP_CHROMA;
1695}
1696
1697static inline void setup_prune_ref_frame_mask(AV1_COMP *cpi) {
1698 if ((!cpi->oxcf.ref_frm_cfg.enable_onesided_comp ||
1699 cpi->sf.inter_sf.disable_onesided_comp) &&
1700 cpi->all_one_sided_refs) {
1701 // Disable all compound references
1702 cpi->prune_ref_frame_mask = (1 << MODE_CTX_REF_FRAMES(REF_FRAMES + (FWD_REFS * BWD_REFS + TOTAL_UNIDIR_COMP_REFS))) - (1 << REF_FRAMES);
1703 } else if (!cpi->sf.rt_sf.use_nonrd_pick_mode &&
1704 cpi->sf.inter_sf.selective_ref_frame >= 2) {
1705 AV1_COMMON *const cm = &cpi->common;
1706 const int cur_frame_display_order_hint =
1707 cm->current_frame.display_order_hint;
1708 unsigned int *ref_display_order_hint =
1709 cm->cur_frame->ref_display_order_hint;
1710 const int arf2_dist = av1_encoder_get_relative_dist(
1711 ref_display_order_hint[ALTREF2_FRAME - LAST_FRAME],
1712 cur_frame_display_order_hint);
1713 const int bwd_dist = av1_encoder_get_relative_dist(
1714 ref_display_order_hint[BWDREF_FRAME - LAST_FRAME],
1715 cur_frame_display_order_hint);
1716
1717 for (int ref_idx = REF_FRAMES; ref_idx < MODE_CTX_REF_FRAMES(REF_FRAMES + (FWD_REFS * BWD_REFS + TOTAL_UNIDIR_COMP_REFS)); ++ref_idx) {
1718 MV_REFERENCE_FRAME rf[2];
1719 av1_set_ref_frame(rf, ref_idx);
1720 if (!(cpi->ref_frame_flags & av1_ref_frame_flag_list[rf[0]]) ||
1721 !(cpi->ref_frame_flags & av1_ref_frame_flag_list[rf[1]])) {
1722 continue;
1723 }
1724
1725 if (!cpi->all_one_sided_refs) {
1726 int ref_dist[2];
1727 for (int i = 0; i < 2; ++i) {
1728 ref_dist[i] = av1_encoder_get_relative_dist(
1729 ref_display_order_hint[rf[i] - LAST_FRAME],
1730 cur_frame_display_order_hint);
1731 }
1732
1733 // One-sided compound is used only when all reference frames are
1734 // one-sided.
1735 if ((ref_dist[0] > 0) == (ref_dist[1] > 0)) {
1736 cpi->prune_ref_frame_mask |= 1 << ref_idx;
1737 }
1738 }
1739
1740 if (cpi->sf.inter_sf.selective_ref_frame >= 4 &&
1741 (rf[0] == ALTREF2_FRAME || rf[1] == ALTREF2_FRAME) &&
1742 (cpi->ref_frame_flags & av1_ref_frame_flag_list[BWDREF_FRAME])) {
1743 // Check if both ALTREF2_FRAME and BWDREF_FRAME are future references.
1744 if (arf2_dist > 0 && bwd_dist > 0 && bwd_dist <= arf2_dist) {
1745 // Drop ALTREF2_FRAME as a reference if BWDREF_FRAME is a closer
1746 // reference to the current frame than ALTREF2_FRAME
1747 cpi->prune_ref_frame_mask |= 1 << ref_idx;
1748 }
1749 }
1750 }
1751 }
1752}
1753
1754static int allow_deltaq_mode(AV1_COMP *cpi) {
1755#if !CONFIG_REALTIME_ONLY0
1756 AV1_COMMON *const cm = &cpi->common;
1757 BLOCK_SIZE sb_size = cm->seq_params->sb_size;
1758 int sbs_wide = mi_size_wide[sb_size];
1759 int sbs_high = mi_size_high[sb_size];
1760
1761 int64_t delta_rdcost = 0;
1762 for (int mi_row = 0; mi_row < cm->mi_params.mi_rows; mi_row += sbs_high) {
1763 for (int mi_col = 0; mi_col < cm->mi_params.mi_cols; mi_col += sbs_wide) {
1764 int64_t this_delta_rdcost = 0;
1765 av1_get_q_for_deltaq_objective(cpi, &cpi->td, &this_delta_rdcost, sb_size,
1766 mi_row, mi_col);
1767 delta_rdcost += this_delta_rdcost;
1768 }
1769 }
1770 return delta_rdcost < 0;
1771#else
1772 (void)cpi;
1773 return 1;
1774#endif // !CONFIG_REALTIME_ONLY
1775}
1776
1777#define FORCE_ZMV_SKIP_128X128_BLK_DIFF10000 10000
1778#define FORCE_ZMV_SKIP_MAX_PER_PIXEL_DIFF4 4
1779
1780// Populates block level thresholds for force zeromv-skip decision
1781static void populate_thresh_to_force_zeromv_skip(AV1_COMP *cpi) {
1782 if (cpi->sf.rt_sf.part_early_exit_zeromv == 0) return;
1783
1784 // Threshold for forcing zeromv-skip decision is as below:
1785 // For 128x128 blocks, threshold is 10000 and per pixel threshold is 0.6103.
1786 // For 64x64 blocks, threshold is 5000 and per pixel threshold is 1.221
1787 // allowing slightly higher error for smaller blocks.
1788 // Per Pixel Threshold of 64x64 block Area of 64x64 block 1 1
1789 // ------------------------------------=sqrt(---------------------)=sqrt(-)=-
1790 // Per Pixel Threshold of 128x128 block Area of 128x128 block 4 2
1791 // Thus, per pixel thresholds for blocks of size 32x32, 16x16,... can be
1792 // chosen as 2.442, 4.884,.... As the per pixel error tends to be higher for
1793 // small blocks, the same is clipped to 4.
1794 const unsigned int thresh_exit_128x128_part = FORCE_ZMV_SKIP_128X128_BLK_DIFF10000;
1795 const int num_128x128_pix =
1796 block_size_wide[BLOCK_128X128] * block_size_high[BLOCK_128X128];
1797
1798 for (BLOCK_SIZE bsize = BLOCK_4X4; bsize < BLOCK_SIZES_ALL; bsize++) {
1799 const int num_block_pix = block_size_wide[bsize] * block_size_high[bsize];
1800
1801 // Calculate the threshold for zeromv-skip decision based on area of the
1802 // partition
1803 unsigned int thresh_exit_part_blk =
1804 (unsigned int)(thresh_exit_128x128_part *
1805 sqrt((double)num_block_pix / num_128x128_pix) +
1806 0.5);
1807 thresh_exit_part_blk = AOMMIN((((thresh_exit_part_blk) < ((unsigned int)(4 * num_block_pix
))) ? (thresh_exit_part_blk) : ((unsigned int)(4 * num_block_pix
)))
1808 thresh_exit_part_blk,(((thresh_exit_part_blk) < ((unsigned int)(4 * num_block_pix
))) ? (thresh_exit_part_blk) : ((unsigned int)(4 * num_block_pix
)))
1809 (unsigned int)(FORCE_ZMV_SKIP_MAX_PER_PIXEL_DIFF * num_block_pix))(((thresh_exit_part_blk) < ((unsigned int)(4 * num_block_pix
))) ? (thresh_exit_part_blk) : ((unsigned int)(4 * num_block_pix
)))
;
1810 cpi->zeromv_skip_thresh_exit_part[bsize] = thresh_exit_part_blk;
1811 }
1812}
1813
1814static void free_block_hash_buffers(uint32_t *block_hash_values[2][2],
1815 int8_t *is_block_same[2][3]) {
1816 for (int k = 0; k < 2; ++k) {
1817 for (int j = 0; j < 2; ++j) {
1818 aom_free(block_hash_values[k][j]);
1819 }
1820
1821 for (int j = 0; j < 3; ++j) {
1822 aom_free(is_block_same[k][j]);
1823 }
1824 }
1825}
1826
1827/*!\brief Determines delta_q_res value for Variance Boost modulation.
1828 */
1829static int aom_get_variance_boost_delta_q_res(int qindex) {
1830 // Signaling delta_q changes across superblocks comes with inherent syntax
1831 // element overhead, which adds up to total payload size. This overhead
1832 // becomes proportionally bigger the higher the base qindex (i.e. lower
1833 // quality, smaller file size), so a balance needs to be struck.
1834 // - Smaller delta_q_res: more granular delta_q control, more bits spent
1835 // signaling deltas.
1836 // - Larger delta_q_res: coarser delta_q control, less bits spent signaling
1837 // deltas.
1838 //
1839 // At the same time, SB qindex fluctuations become larger the higher
1840 // the base qindex (between lowest and highest-variance regions):
1841 // - For QP 5: up to 8 qindexes
1842 // - For QP 60: up to 52 qindexes
1843 //
1844 // With these factors in mind, it was found that the best strategy that
1845 // maximizes quality per bitrate is by having very finely-grained delta_q
1846 // values for the lowest picture qindexes (to preserve tiny qindex SB deltas),
1847 // and progressively making them coarser as base qindex increases (to reduce
1848 // total signaling overhead).
1849 int delta_q_res = 1;
1850
1851 if (qindex >= 160) {
1852 delta_q_res = 8;
1853 } else if (qindex >= 120) {
1854 delta_q_res = 4;
1855 } else if (qindex >= 80) {
1856 delta_q_res = 2;
1857 } else {
1858 delta_q_res = 1;
1859 }
1860
1861 return delta_q_res;
1862}
1863
1864#if !CONFIG_REALTIME_ONLY0
1865static float get_thresh_based_on_q(int qindex, int speed) {
1866 const float min_threshold_arr[2] = { 0.06f, 0.09f };
1867 const float max_threshold_arr[2] = { 0.10f, 0.13f };
1868
1869 const float min_thresh = min_threshold_arr[speed >= 3];
1870 const float max_thresh = max_threshold_arr[speed >= 3];
1871 const float thresh = min_thresh + (max_thresh - min_thresh) *
1872 ((float)MAXQ255 - (float)qindex) /
1873 (float)(MAXQ255 - MINQ0);
1874 return thresh;
1875}
1876
1877static int get_mv_err(MV cur_mv, MV ref_mv) {
1878 const MV diff = { cur_mv.row - ref_mv.row, cur_mv.col - ref_mv.col };
1879 const MV abs_diff = { abs(diff.row), abs(diff.col) };
1880 const int mv_err = (abs_diff.row + abs_diff.col);
1881 return mv_err;
1882}
1883
1884static void check_mv_err_and_update(MV cur_mv, MV ref_mv, int *best_mv_err) {
1885 const int mv_err = get_mv_err(cur_mv, ref_mv);
1886 *best_mv_err = AOMMIN(mv_err, *best_mv_err)(((mv_err) < (*best_mv_err)) ? (mv_err) : (*best_mv_err));
1887}
1888
1889static int is_inside_frame_border(int mi_row, int mi_col, int row_offset,
1890 int col_offset, int num_mi_rows,
1891 int num_mi_cols) {
1892 if (mi_row + row_offset < 0 || mi_row + row_offset >= num_mi_rows ||
1893 mi_col + col_offset < 0 || mi_col + col_offset >= num_mi_cols)
1894 return 0;
1895
1896 return 1;
1897}
1898
1899// Compute the minimum MV error between current MV and spatial MV predictors.
1900static int get_spatial_mvpred_err(AV1_COMMON *cm, TplParams *const tpl_data,
1901 int tpl_idx, int mi_row, int mi_col,
1902 int ref_idx, int_mv cur_mv, int allow_hp,
1903 int is_integer) {
1904 const TplDepFrame *tpl_frame = &tpl_data->tpl_frame[tpl_idx];
1905 TplDepStats *tpl_ptr = tpl_frame->tpl_stats_ptr;
1906 const uint8_t block_mis_log2 = tpl_data->tpl_stats_block_mis_log2;
1907
1908 int mv_err = INT32_MAX(2147483647);
1909 const int step = 1 << block_mis_log2;
1910 const int mv_pred_pos_in_mis[6][2] = {
1911 { -step, 0 }, { 0, -step }, { -step, step },
1912 { -step, -step }, { -2 * step, 0 }, { 0, -2 * step },
1913 };
1914
1915 for (int i = 0; i < 6; i++) {
1916 int row_offset = mv_pred_pos_in_mis[i][0];
1917 int col_offset = mv_pred_pos_in_mis[i][1];
1918 if (!is_inside_frame_border(mi_row, mi_col, row_offset, col_offset,
1919 tpl_frame->mi_rows, tpl_frame->mi_cols)) {
1920 continue;
1921 }
1922
1923 const TplDepStats *tpl_stats =
1924 &tpl_ptr[av1_tpl_ptr_pos(mi_row + row_offset, mi_col + col_offset,
1925 tpl_frame->stride, block_mis_log2)];
1926 int_mv this_refmv = tpl_stats->mv[ref_idx];
1927 lower_mv_precision(&this_refmv.as_mv, allow_hp, is_integer);
1928 check_mv_err_and_update(cur_mv.as_mv, this_refmv.as_mv, &mv_err);
1929 }
1930
1931 // Check MV error w.r.t. Global MV / Zero MV
1932 int_mv gm_mv = { 0 };
1933 if (cm->global_motion[ref_idx + LAST_FRAME].wmtype > TRANSLATION) {
1934 const BLOCK_SIZE bsize = convert_length_to_bsize(tpl_data->tpl_bsize_1d);
1935 gm_mv = gm_get_motion_vector(&cm->global_motion[ref_idx + LAST_FRAME],
1936 allow_hp, bsize, mi_col, mi_row, is_integer);
1937 }
1938 check_mv_err_and_update(cur_mv.as_mv, gm_mv.as_mv, &mv_err);
1939
1940 return mv_err;
1941}
1942
1943// Compute the minimum MV error between current MV and temporal MV predictors.
1944static int get_temporal_mvpred_err(AV1_COMMON *cm, int mi_row, int mi_col,
1945 int num_mi_rows, int num_mi_cols,
1946 int ref_idx, int_mv cur_mv, int allow_hp,
1947 int is_integer) {
1948 const RefCntBuffer *ref_buf = get_ref_frame_buf(cm, ref_idx + LAST_FRAME);
1949 if (ref_buf == NULL((void*)0)) return INT32_MAX(2147483647);
1950 int cur_to_ref_dist =
1951 get_relative_dist(&cm->seq_params->order_hint_info,
1952 cm->cur_frame->order_hint, ref_buf->order_hint);
1953
1954 int mv_err = INT32_MAX(2147483647);
1955 const int mv_pred_pos_in_mis[7][2] = {
1956 { 0, 0 }, { 0, 2 }, { 2, 0 }, { 2, 2 }, { 4, -2 }, { 4, 4 }, { 2, 4 },
1957 };
1958
1959 for (int i = 0; i < 7; i++) {
1960 int row_offset = mv_pred_pos_in_mis[i][0];
1961 int col_offset = mv_pred_pos_in_mis[i][1];
1962 if (!is_inside_frame_border(mi_row, mi_col, row_offset, col_offset,
1963 num_mi_rows, num_mi_cols)) {
1964 continue;
1965 }
1966 const TPL_MV_REF *ref_mvs =
1967 cm->tpl_mvs +
1968 ((mi_row + row_offset) >> 1) * (cm->mi_params.mi_stride >> 1) +
1969 ((mi_col + col_offset) >> 1);
1970 if (ref_mvs->mfmv0.as_int == INVALID_MV0x80008000) continue;
1971
1972 int_mv this_refmv;
1973 av1_get_mv_projection(&this_refmv.as_mv, ref_mvs->mfmv0.as_mv,
1974 cur_to_ref_dist, ref_mvs->ref_frame_offset);
1975 lower_mv_precision(&this_refmv.as_mv, allow_hp, is_integer);
1976 check_mv_err_and_update(cur_mv.as_mv, this_refmv.as_mv, &mv_err);
1977 }
1978
1979 return mv_err;
1980}
1981
1982// Determine whether to disable temporal MV prediction for the current frame
1983// based on TPL and motion field data. Temporal MV prediction is disabled if the
1984// reduction in MV error by including temporal MVs as MV predictors is small.
1985static void check_to_disable_ref_frame_mvs(AV1_COMP *cpi) {
1986 AV1_COMMON *cm = &cpi->common;
1987 if (!cm->features.allow_ref_frame_mvs || cpi->sf.hl_sf.ref_frame_mvs_lvl != 1)
1988 return;
1989
1990 const int tpl_idx = cpi->gf_frame_index;
1991 TplParams *const tpl_data = &cpi->ppi->tpl_data;
1992 if (!av1_tpl_stats_ready(tpl_data, tpl_idx)) return;
1993
1994 const SUBPEL_FORCE_STOP tpl_subpel_precision =
1995 cpi->sf.tpl_sf.subpel_force_stop;
1996 const int allow_high_precision_mv = tpl_subpel_precision == EIGHTH_PEL &&
1997 cm->features.allow_high_precision_mv;
1998 const int force_integer_mv = tpl_subpel_precision == FULL_PEL ||
1999 cm->features.cur_frame_force_integer_mv;
2000
2001 const TplDepFrame *tpl_frame = &tpl_data->tpl_frame[tpl_idx];
2002 TplDepStats *tpl_ptr = tpl_frame->tpl_stats_ptr;
2003 const uint8_t block_mis_log2 = tpl_data->tpl_stats_block_mis_log2;
2004 const int step = 1 << block_mis_log2;
2005
2006 uint64_t accum_spatial_mvpred_err = 0;
2007 uint64_t accum_best_err = 0;
2008
2009 for (int mi_row = 0; mi_row < tpl_frame->mi_rows; mi_row += step) {
2010 for (int mi_col = 0; mi_col < tpl_frame->mi_cols; mi_col += step) {
2011 TplDepStats *tpl_stats_ptr = &tpl_ptr[av1_tpl_ptr_pos(
2012 mi_row, mi_col, tpl_frame->stride, block_mis_log2)];
2013 const int cur_best_ref_idx = tpl_stats_ptr->ref_frame_index[0];
2014 if (cur_best_ref_idx == NONE_FRAME) continue;
2015
2016 int_mv cur_mv = tpl_stats_ptr->mv[cur_best_ref_idx];
2017 lower_mv_precision(&cur_mv.as_mv, allow_high_precision_mv,
2018 force_integer_mv);
2019
2020 const int cur_spatial_mvpred_err = get_spatial_mvpred_err(
2021 cm, tpl_data, tpl_idx, mi_row, mi_col, cur_best_ref_idx, cur_mv,
2022 allow_high_precision_mv, force_integer_mv);
2023
2024 const int cur_temporal_mvpred_err = get_temporal_mvpred_err(
2025 cm, mi_row, mi_col, tpl_frame->mi_rows, tpl_frame->mi_cols,
2026 cur_best_ref_idx, cur_mv, allow_high_precision_mv, force_integer_mv);
2027
2028 const int cur_best_err =
2029 AOMMIN(cur_spatial_mvpred_err, cur_temporal_mvpred_err)(((cur_spatial_mvpred_err) < (cur_temporal_mvpred_err)) ? (
cur_spatial_mvpred_err) : (cur_temporal_mvpred_err))
;
2030 accum_spatial_mvpred_err += cur_spatial_mvpred_err;
2031 accum_best_err += cur_best_err;
2032 }
2033 }
2034
2035 const float threshold =
2036 get_thresh_based_on_q(cm->quant_params.base_qindex, cpi->oxcf.speed);
2037 const float mv_err_reduction =
2038 (float)(accum_spatial_mvpred_err - accum_best_err);
2039
2040 if (mv_err_reduction <= threshold * accum_spatial_mvpred_err)
2041 cm->features.allow_ref_frame_mvs = 0;
2042}
2043#endif // !CONFIG_REALTIME_ONLY
2044
2045/*!\brief Encoder setup(only for the current frame), encoding, and recontruction
2046 * for a single frame
2047 *
2048 * \ingroup high_level_algo
2049 */
2050static inline void encode_frame_internal(AV1_COMP *cpi) {
2051 ThreadData *const td = &cpi->td;
2052 MACROBLOCK *const x = &td->mb;
2053 AV1_COMMON *const cm = &cpi->common;
2054 CommonModeInfoParams *const mi_params = &cm->mi_params;
2055 FeatureFlags *const features = &cm->features;
2056 MACROBLOCKD *const xd = &x->e_mbd;
2057 RD_COUNTS *const rdc = &cpi->td.rd_counts;
2058#if CONFIG_FPMT_TEST0
2059 FrameProbInfo *const temp_frame_probs = &cpi->ppi->temp_frame_probs;
2060 FrameProbInfo *const temp_frame_probs_simulation =
2061 &cpi->ppi->temp_frame_probs_simulation;
2062#endif
2063 FrameProbInfo *const frame_probs = &cpi->ppi->frame_probs;
2064 IntraBCHashInfo *const intrabc_hash_info = &x->intrabc_hash_info;
2065 MultiThreadInfo *const mt_info = &cpi->mt_info;
2066 AV1EncRowMultiThreadInfo *const enc_row_mt = &mt_info->enc_row_mt;
2067 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
2068 const DELTAQ_MODE deltaq_mode = oxcf->q_cfg.deltaq_mode;
2069 int i;
2070
2071 if (!cpi->sf.rt_sf.use_nonrd_pick_mode) {
2072 mi_params->setup_mi(mi_params);
2073 }
2074
2075 set_mi_offsets(mi_params, xd, 0, 0);
2076
2077 av1_zero(*td->counts)memset(&(*td->counts), 0, sizeof(*td->counts));
2078 av1_zero(rdc->tx_type_used)memset(&(rdc->tx_type_used), 0, sizeof(rdc->tx_type_used
))
;
2079 av1_zero(rdc->obmc_used)memset(&(rdc->obmc_used), 0, sizeof(rdc->obmc_used)
)
;
2080 av1_zero(rdc->warped_used)memset(&(rdc->warped_used), 0, sizeof(rdc->warped_used
))
;
2081 av1_zero(rdc->seg_tmp_pred_cost)memset(&(rdc->seg_tmp_pred_cost), 0, sizeof(rdc->seg_tmp_pred_cost
))
;
2082
2083 // Reset the flag.
2084 cpi->intrabc_used = 0;
2085 // Need to disable intrabc when superres is selected
2086 if (av1_superres_scaled(cm)) {
2087 features->allow_intrabc = 0;
2088 }
2089
2090 features->allow_intrabc &= (oxcf->kf_cfg.enable_intrabc);
2091
2092 if (features->allow_warped_motion &&
2093 cpi->sf.inter_sf.prune_warped_prob_thresh > 0) {
2094 const FRAME_UPDATE_TYPE update_type =
2095 get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
2096 int warped_probability =
2097#if CONFIG_FPMT_TEST0
2098 cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE
2099 ? temp_frame_probs->warped_probs[update_type]
2100 :
2101#endif // CONFIG_FPMT_TEST
2102 frame_probs->warped_probs[update_type];
2103 if (warped_probability < cpi->sf.inter_sf.prune_warped_prob_thresh)
2104 features->allow_warped_motion = 0;
2105 }
2106
2107 int hash_table_created = 0;
2108 if (!is_stat_generation_stage(cpi) && av1_use_hash_me(cpi) &&
2109 !cpi->sf.rt_sf.use_nonrd_pick_mode) {
2110 // TODO(any): move this outside of the recoding loop to avoid recalculating
2111 // the hash table.
2112 // add to hash table
2113 const int pic_width = cpi->source->y_crop_width;
2114 const int pic_height = cpi->source->y_crop_height;
2115 uint32_t *block_hash_values[2][2] = { { NULL((void*)0) } };
2116 int8_t *is_block_same[2][3] = { { NULL((void*)0) } };
2117 int k, j;
2118 bool_Bool error = false0;
2119
2120 for (k = 0; k < 2 && !error; ++k) {
2121 for (j = 0; j < 2; ++j) {
2122 block_hash_values[k][j] = (uint32_t *)aom_malloc(
2123 sizeof(*block_hash_values[0][0]) * pic_width * pic_height);
2124 if (!block_hash_values[k][j]) {
2125 error = true1;
2126 break;
2127 }
2128 }
2129
2130 for (j = 0; j < 3 && !error; ++j) {
2131 is_block_same[k][j] = (int8_t *)aom_malloc(
2132 sizeof(*is_block_same[0][0]) * pic_width * pic_height);
2133 if (!is_block_same[k][j]) error = true1;
2134 }
2135 }
2136
2137 av1_hash_table_init(intrabc_hash_info);
2138 if (error ||
2139 !av1_hash_table_create(&intrabc_hash_info->intrabc_hash_table)) {
2140 free_block_hash_buffers(block_hash_values, is_block_same);
2141 aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
2142 "Error allocating intrabc_hash_table and buffers");
2143 }
2144 hash_table_created = 1;
2145 av1_generate_block_2x2_hash_value(intrabc_hash_info, cpi->source,
2146 block_hash_values[0], is_block_same[0]);
2147 // Hash data generated for screen contents is used for intraBC ME
2148 const int min_alloc_size = block_size_wide[mi_params->mi_alloc_bsize];
2149 const int max_sb_size =
2150 (1 << (cm->seq_params->mib_size_log2 + MI_SIZE_LOG22));
2151 int src_idx = 0;
2152 for (int size = 4; size <= max_sb_size; size *= 2, src_idx = !src_idx) {
2153 const int dst_idx = !src_idx;
2154 av1_generate_block_hash_value(
2155 intrabc_hash_info, cpi->source, size, block_hash_values[src_idx],
2156 block_hash_values[dst_idx], is_block_same[src_idx],
2157 is_block_same[dst_idx]);
2158 if (size >= min_alloc_size) {
2159 if (!av1_add_to_hash_map_by_row_with_precal_data(
2160 &intrabc_hash_info->intrabc_hash_table,
2161 block_hash_values[dst_idx], is_block_same[dst_idx][2],
2162 pic_width, pic_height, size)) {
2163 error = true1;
2164 break;
2165 }
2166 }
2167 }
2168
2169 free_block_hash_buffers(block_hash_values, is_block_same);
2170
2171 if (error) {
2172 aom_internal_error(cm->error, AOM_CODEC_MEM_ERROR,
2173 "Error adding data to intrabc_hash_table");
2174 }
2175 }
2176
2177 const CommonQuantParams *quant_params = &cm->quant_params;
2178 for (i = 0; i < MAX_SEGMENTS8; ++i) {
2179 const int qindex =
2180 cm->seg.enabled ? av1_get_qindex(&cm->seg, i, quant_params->base_qindex)
2181 : quant_params->base_qindex;
2182 xd->lossless[i] =
2183 qindex == 0 && quant_params->y_dc_delta_q == 0 &&
2184 quant_params->u_dc_delta_q == 0 && quant_params->u_ac_delta_q == 0 &&
2185 quant_params->v_dc_delta_q == 0 && quant_params->v_ac_delta_q == 0;
2186 if (xd->lossless[i]) cpi->enc_seg.has_lossless_segment = 1;
2187 xd->qindex[i] = qindex;
2188 if (xd->lossless[i]) {
2189 cpi->optimize_seg_arr[i] = NO_TRELLIS_OPT;
2190 } else {
2191 cpi->optimize_seg_arr[i] = cpi->sf.rd_sf.optimize_coefficients;
2192 }
2193 }
2194 features->coded_lossless = is_coded_lossless(cm, xd);
2195 features->all_lossless = features->coded_lossless && !av1_superres_scaled(cm);
2196
2197 // Fix delta q resolution for the moment
2198
2199 cm->delta_q_info.delta_q_res = 0;
2200 if (cpi->use_ducky_encode) {
2201 cm->delta_q_info.delta_q_res = DEFAULT_DELTA_Q_RES_DUCKY_ENCODE4;
2202 } else if (cpi->oxcf.q_cfg.aq_mode != CYCLIC_REFRESH_AQ &&
2203 !cpi->roi.enabled) {
2204 if (deltaq_mode == DELTA_Q_OBJECTIVE)
2205 cm->delta_q_info.delta_q_res = DEFAULT_DELTA_Q_RES_OBJECTIVE4;
2206 else if (deltaq_mode == DELTA_Q_PERCEPTUAL)
2207 cm->delta_q_info.delta_q_res = DEFAULT_DELTA_Q_RES_PERCEPTUAL4;
2208 else if (deltaq_mode == DELTA_Q_PERCEPTUAL_AI)
2209 cm->delta_q_info.delta_q_res = DEFAULT_DELTA_Q_RES_PERCEPTUAL4;
2210 else if (deltaq_mode == DELTA_Q_USER_RATING_BASED)
2211 cm->delta_q_info.delta_q_res = DEFAULT_DELTA_Q_RES_PERCEPTUAL4;
2212 else if (deltaq_mode == DELTA_Q_HDR)
2213 cm->delta_q_info.delta_q_res = DEFAULT_DELTA_Q_RES_PERCEPTUAL4;
2214 else if (deltaq_mode == DELTA_Q_VARIANCE_BOOST)
2215 cm->delta_q_info.delta_q_res =
2216 aom_get_variance_boost_delta_q_res(quant_params->base_qindex);
2217 // Set delta_q_present_flag before it is used for the first time
2218 cm->delta_q_info.delta_lf_res = DEFAULT_DELTA_LF_RES2;
2219 cm->delta_q_info.delta_q_present_flag = deltaq_mode != NO_DELTA_Q;
2220
2221 // Turn off cm->delta_q_info.delta_q_present_flag if objective delta_q
2222 // is used for ineligible frames. That effectively will turn off row_mt
2223 // usage. Note objective delta_q and tpl eligible frames are only altref
2224 // frames currently.
2225 const GF_GROUP *gf_group = &cpi->ppi->gf_group;
2226 if (cm->delta_q_info.delta_q_present_flag) {
2227 if (deltaq_mode == DELTA_Q_OBJECTIVE &&
2228 gf_group->update_type[cpi->gf_frame_index] == LF_UPDATE)
2229 cm->delta_q_info.delta_q_present_flag = 0;
2230
2231 if (deltaq_mode == DELTA_Q_OBJECTIVE &&
2232 cm->delta_q_info.delta_q_present_flag) {
2233 cm->delta_q_info.delta_q_present_flag &= allow_deltaq_mode(cpi);
2234 }
2235 }
2236
2237 // Reset delta_q_used flag
2238 cpi->deltaq_used = 0;
2239
2240 cm->delta_q_info.delta_lf_present_flag =
2241 cm->delta_q_info.delta_q_present_flag &&
2242 oxcf->tool_cfg.enable_deltalf_mode;
2243 cm->delta_q_info.delta_lf_multi = DEFAULT_DELTA_LF_MULTI0;
2244
2245 // update delta_q_present_flag and delta_lf_present_flag based on
2246 // base_qindex
2247 cm->delta_q_info.delta_q_present_flag &= quant_params->base_qindex > 0;
2248 cm->delta_q_info.delta_lf_present_flag &= quant_params->base_qindex > 0;
2249 } else if (cpi->cyclic_refresh->apply_cyclic_refresh ||
2250 cpi->svc.number_temporal_layers == 1) {
2251 cpi->cyclic_refresh->actual_num_seg1_blocks = 0;
2252 cpi->cyclic_refresh->actual_num_seg2_blocks = 0;
2253 }
2254 cpi->rc.cnt_zeromv = 0;
2255
2256 av1_frame_init_quantizer(cpi);
2257 init_encode_frame_mb_context(cpi);
2258 set_default_interp_skip_flags(cm, &cpi->interp_search_flags);
2259
2260 if (cm->prev_frame && cm->prev_frame->seg.enabled &&
2261 cpi->svc.number_spatial_layers == 1)
2262 cm->last_frame_seg_map = cm->prev_frame->seg_map;
2263 else
2264 cm->last_frame_seg_map = NULL((void*)0);
2265 if (features->allow_intrabc || features->coded_lossless) {
2266 av1_set_default_ref_deltas(cm->lf.ref_deltas);
2267 av1_set_default_mode_deltas(cm->lf.mode_deltas);
2268 } else if (cm->prev_frame) {
2269 memcpy(cm->lf.ref_deltas, cm->prev_frame->ref_deltas, REF_FRAMES);
2270 memcpy(cm->lf.mode_deltas, cm->prev_frame->mode_deltas, MAX_MODE_LF_DELTAS2);
2271 }
2272 memcpy(cm->cur_frame->ref_deltas, cm->lf.ref_deltas, REF_FRAMES);
2273 memcpy(cm->cur_frame->mode_deltas, cm->lf.mode_deltas, MAX_MODE_LF_DELTAS2);
2274
2275 cpi->all_one_sided_refs =
2276 frame_is_intra_only(cm) ? 0 : refs_are_one_sided(cm);
2277
2278 cpi->prune_ref_frame_mask = 0;
2279 // Figure out which ref frames can be skipped at frame level.
2280 setup_prune_ref_frame_mask(cpi);
2281
2282 x->txfm_search_info.txb_split_count = 0;
2283#if CONFIG_SPEED_STATS0
2284 x->txfm_search_info.tx_search_count = 0;
2285#endif // CONFIG_SPEED_STATS
2286
2287#if !CONFIG_REALTIME_ONLY0
2288#if CONFIG_COLLECT_COMPONENT_TIMING0
2289 start_timing(cpi, av1_compute_global_motion_time);
2290#endif
2291 av1_compute_global_motion_facade(cpi);
2292#if CONFIG_COLLECT_COMPONENT_TIMING0
2293 end_timing(cpi, av1_compute_global_motion_time);
2294#endif
2295#endif // !CONFIG_REALTIME_ONLY
2296
2297#if CONFIG_COLLECT_COMPONENT_TIMING0
2298 start_timing(cpi, av1_setup_motion_field_time);
2299#endif
2300 av1_calculate_ref_frame_side(cm);
2301
2302 features->allow_ref_frame_mvs &= !(cpi->sf.hl_sf.ref_frame_mvs_lvl == 2);
2303 if (features->allow_ref_frame_mvs) av1_setup_motion_field(cm);
2304#if !CONFIG_REALTIME_ONLY0
2305 check_to_disable_ref_frame_mvs(cpi);
2306#endif // !CONFIG_REALTIME_ONLY
2307
2308#if CONFIG_COLLECT_COMPONENT_TIMING0
2309 end_timing(cpi, av1_setup_motion_field_time);
2310#endif
2311
2312 cm->current_frame.skip_mode_info.skip_mode_flag =
2313 check_skip_mode_enabled(cpi);
2314
2315 // Initialization of skip mode cost depends on the value of
2316 // 'skip_mode_flag'. This initialization happens in the function
2317 // av1_fill_mode_rates(), which is in turn called in
2318 // av1_initialize_rd_consts(). Thus, av1_initialize_rd_consts()
2319 // has to be called after 'skip_mode_flag' is initialized.
2320 av1_initialize_rd_consts(cpi);
2321 av1_set_sad_per_bit(cpi, &x->sadperbit, quant_params->base_qindex);
2322 populate_thresh_to_force_zeromv_skip(cpi);
2323
2324 enc_row_mt->sync_read_ptr = av1_row_mt_sync_read_dummy;
2325 enc_row_mt->sync_write_ptr = av1_row_mt_sync_write_dummy;
2326 mt_info->row_mt_enabled = 0;
2327 mt_info->pack_bs_mt_enabled = AOMMIN(mt_info->num_mod_workers[MOD_PACK_BS],(((mt_info->num_mod_workers[MOD_PACK_BS]) < (cm->tiles
.cols * cm->tiles.rows)) ? (mt_info->num_mod_workers[MOD_PACK_BS
]) : (cm->tiles.cols * cm->tiles.rows))
2328 cm->tiles.cols * cm->tiles.rows)(((mt_info->num_mod_workers[MOD_PACK_BS]) < (cm->tiles
.cols * cm->tiles.rows)) ? (mt_info->num_mod_workers[MOD_PACK_BS
]) : (cm->tiles.cols * cm->tiles.rows))
> 1;
2329
2330 if (oxcf->row_mt && (mt_info->num_workers > 1)) {
2331 mt_info->row_mt_enabled = 1;
2332 enc_row_mt->sync_read_ptr = av1_row_mt_sync_read;
2333 enc_row_mt->sync_write_ptr = av1_row_mt_sync_write;
2334 av1_encode_tiles_row_mt(cpi);
2335 } else {
2336 if (AOMMIN(mt_info->num_workers, cm->tiles.cols * cm->tiles.rows)(((mt_info->num_workers) < (cm->tiles.cols * cm->
tiles.rows)) ? (mt_info->num_workers) : (cm->tiles.cols
* cm->tiles.rows))
> 1) {
2337 av1_encode_tiles_mt(cpi);
2338 } else {
2339 // Preallocate the pc_tree for realtime coding to reduce the cost of
2340 // memory allocation.
2341 const int use_nonrd_mode = cpi->sf.rt_sf.use_nonrd_pick_mode;
2342 if (use_nonrd_mode) {
2343 td->pc_root = av1_alloc_pc_tree_node(cm->seq_params->sb_size);
2344 if (!td->pc_root)
2345 aom_internal_error(xd->error_info, AOM_CODEC_MEM_ERROR,
2346 "Failed to allocate PC_TREE");
2347 } else {
2348 td->pc_root = NULL((void*)0);
2349 }
2350
2351 encode_tiles(cpi);
2352 av1_free_pc_tree_recursive(td->pc_root, av1_num_planes(cm), 0, 0,
2353 cpi->sf.part_sf.partition_search_type);
2354 td->pc_root = NULL((void*)0);
2355 }
2356 }
2357
2358 // If intrabc is allowed but never selected, reset the allow_intrabc flag.
2359 if (features->allow_intrabc && !cpi->intrabc_used) {
2360 features->allow_intrabc = 0;
2361 }
2362 if (features->allow_intrabc) {
2363 cm->delta_q_info.delta_lf_present_flag = 0;
2364 }
2365
2366 if (cm->delta_q_info.delta_q_present_flag && cpi->deltaq_used == 0) {
2367 cm->delta_q_info.delta_q_present_flag = 0;
2368 }
2369
2370 // Set the transform size appropriately before bitstream creation
2371 const MODE_EVAL_TYPE eval_type =
2372 cpi->sf.winner_mode_sf.enable_winner_mode_for_tx_size_srch
2373 ? WINNER_MODE_EVAL
2374 : DEFAULT_EVAL;
2375 const TX_SIZE_SEARCH_METHOD tx_search_type =
2376 cpi->winner_mode_params.tx_size_search_methods[eval_type];
2377 assert(oxcf->txfm_cfg.enable_tx64 || tx_search_type != USE_LARGESTALL)((void) sizeof ((oxcf->txfm_cfg.enable_tx64 || tx_search_type
!= USE_LARGESTALL) ? 1 : 0), __extension__ ({ if (oxcf->txfm_cfg
.enable_tx64 || tx_search_type != USE_LARGESTALL) ; else __assert_fail
("oxcf->txfm_cfg.enable_tx64 || tx_search_type != USE_LARGESTALL"
, "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 2377, __extension__ __PRETTY_FUNCTION__); }))
;
2378 features->tx_mode = select_tx_mode(cm, tx_search_type);
2379
2380 // Retain the frame level probability update conditions for parallel frames.
2381 // These conditions will be consumed during postencode stage to update the
2382 // probability.
2383 if (cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) {
2384 cpi->do_update_frame_probs_txtype[cpi->num_frame_recode] =
2385 cpi->sf.tx_sf.tx_type_search.prune_tx_type_using_stats;
2386 cpi->do_update_frame_probs_obmc[cpi->num_frame_recode] =
2387 (cpi->sf.inter_sf.prune_obmc_prob_thresh > 0 &&
2388 cpi->sf.inter_sf.prune_obmc_prob_thresh < INT_MAX2147483647);
2389 cpi->do_update_frame_probs_warp[cpi->num_frame_recode] =
2390 (features->allow_warped_motion &&
2391 cpi->sf.inter_sf.prune_warped_prob_thresh > 0);
2392 cpi->do_update_frame_probs_interpfilter[cpi->num_frame_recode] =
2393 (cm->current_frame.frame_type != KEY_FRAME &&
2394 cpi->sf.interp_sf.adaptive_interp_filter_search == 2 &&
2395 features->interp_filter == SWITCHABLE);
2396 }
2397
2398 if (cpi->sf.tx_sf.tx_type_search.prune_tx_type_using_stats ||
2399 ((cpi->sf.tx_sf.tx_type_search.fast_inter_tx_type_prob_thresh !=
2400 INT_MAX2147483647) &&
2401 (cpi->sf.tx_sf.tx_type_search.fast_inter_tx_type_prob_thresh != 0))) {
2402 const FRAME_UPDATE_TYPE update_type =
2403 get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
2404 for (i = 0; i < TX_SIZES_ALL; i++) {
2405 int sum = 0;
2406 int j;
2407 int left = MAX_TX_TYPE_PROB1024;
2408
2409 for (j = 0; j < TX_TYPES; j++)
2410 sum += cpi->td.rd_counts.tx_type_used[i][j];
2411
2412 for (j = TX_TYPES - 1; j >= 0; j--) {
2413 int update_txtype_frameprobs = 1;
2414 const int new_prob =
2415 sum ? (int)((int64_t)MAX_TX_TYPE_PROB1024 *
2416 cpi->td.rd_counts.tx_type_used[i][j] / sum)
2417 : (j ? 0 : MAX_TX_TYPE_PROB1024);
2418#if CONFIG_FPMT_TEST0
2419 if (cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE) {
2420 if (cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] ==
2421 0) {
2422 int prob =
2423 (temp_frame_probs_simulation->tx_type_probs[update_type][i][j] +
2424 new_prob) >>
2425 1;
2426 left -= prob;
2427 if (j == 0) prob += left;
2428 temp_frame_probs_simulation->tx_type_probs[update_type][i][j] =
2429 prob;
2430 // Copy temp_frame_probs_simulation to temp_frame_probs
2431 for (int update_type_idx = 0; update_type_idx < FRAME_UPDATE_TYPES;
2432 update_type_idx++) {
2433 temp_frame_probs->tx_type_probs[update_type_idx][i][j] =
2434 temp_frame_probs_simulation
2435 ->tx_type_probs[update_type_idx][i][j];
2436 }
2437 }
2438 update_txtype_frameprobs = 0;
2439 }
2440#endif // CONFIG_FPMT_TEST
2441 // Track the frame probabilities of parallel encode frames to update
2442 // during postencode stage.
2443 if (cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) {
2444 update_txtype_frameprobs = 0;
2445 cpi->frame_new_probs[cpi->num_frame_recode]
2446 .tx_type_probs[update_type][i][j] = new_prob;
2447 }
2448 if (update_txtype_frameprobs) {
2449 int prob =
2450 (frame_probs->tx_type_probs[update_type][i][j] + new_prob) >> 1;
2451 left -= prob;
2452 if (j == 0) prob += left;
2453 frame_probs->tx_type_probs[update_type][i][j] = prob;
2454 }
2455 }
2456 }
2457 }
2458
2459 if (cm->seg.enabled) {
2460 cm->seg.temporal_update = 1;
2461 if (rdc->seg_tmp_pred_cost[0] < rdc->seg_tmp_pred_cost[1])
2462 cm->seg.temporal_update = 0;
2463 }
2464
2465 if (cpi->sf.inter_sf.prune_obmc_prob_thresh > 0 &&
2466 cpi->sf.inter_sf.prune_obmc_prob_thresh < INT_MAX2147483647) {
2467 const FRAME_UPDATE_TYPE update_type =
2468 get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
2469
2470 for (i = 0; i < BLOCK_SIZES_ALL; i++) {
2471 int sum = 0;
2472 int update_obmc_frameprobs = 1;
2473 for (int j = 0; j < 2; j++) sum += cpi->td.rd_counts.obmc_used[i][j];
2474
2475 const int new_prob =
2476 sum ? 128 * cpi->td.rd_counts.obmc_used[i][1] / sum : 0;
2477#if CONFIG_FPMT_TEST0
2478 if (cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE) {
2479 if (cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] == 0) {
2480 temp_frame_probs_simulation->obmc_probs[update_type][i] =
2481 (temp_frame_probs_simulation->obmc_probs[update_type][i] +
2482 new_prob) >>
2483 1;
2484 // Copy temp_frame_probs_simulation to temp_frame_probs
2485 for (int update_type_idx = 0; update_type_idx < FRAME_UPDATE_TYPES;
2486 update_type_idx++) {
2487 temp_frame_probs->obmc_probs[update_type_idx][i] =
2488 temp_frame_probs_simulation->obmc_probs[update_type_idx][i];
2489 }
2490 }
2491 update_obmc_frameprobs = 0;
2492 }
2493#endif // CONFIG_FPMT_TEST
2494 // Track the frame probabilities of parallel encode frames to update
2495 // during postencode stage.
2496 if (cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) {
2497 update_obmc_frameprobs = 0;
2498 cpi->frame_new_probs[cpi->num_frame_recode].obmc_probs[update_type][i] =
2499 new_prob;
2500 }
2501 if (update_obmc_frameprobs) {
2502 frame_probs->obmc_probs[update_type][i] =
2503 (frame_probs->obmc_probs[update_type][i] + new_prob) >> 1;
2504 }
2505 }
2506 }
2507
2508 if (features->allow_warped_motion &&
2509 cpi->sf.inter_sf.prune_warped_prob_thresh > 0) {
2510 const FRAME_UPDATE_TYPE update_type =
2511 get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
2512 int update_warp_frameprobs = 1;
2513 int sum = 0;
2514 for (i = 0; i < 2; i++) sum += cpi->td.rd_counts.warped_used[i];
2515 const int new_prob = sum ? 128 * cpi->td.rd_counts.warped_used[1] / sum : 0;
2516#if CONFIG_FPMT_TEST0
2517 if (cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE) {
2518 if (cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] == 0) {
2519 temp_frame_probs_simulation->warped_probs[update_type] =
2520 (temp_frame_probs_simulation->warped_probs[update_type] +
2521 new_prob) >>
2522 1;
2523 // Copy temp_frame_probs_simulation to temp_frame_probs
2524 for (int update_type_idx = 0; update_type_idx < FRAME_UPDATE_TYPES;
2525 update_type_idx++) {
2526 temp_frame_probs->warped_probs[update_type_idx] =
2527 temp_frame_probs_simulation->warped_probs[update_type_idx];
2528 }
2529 }
2530 update_warp_frameprobs = 0;
2531 }
2532#endif // CONFIG_FPMT_TEST
2533 // Track the frame probabilities of parallel encode frames to update
2534 // during postencode stage.
2535 if (cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) {
2536 update_warp_frameprobs = 0;
2537 cpi->frame_new_probs[cpi->num_frame_recode].warped_probs[update_type] =
2538 new_prob;
2539 }
2540 if (update_warp_frameprobs) {
2541 frame_probs->warped_probs[update_type] =
2542 (frame_probs->warped_probs[update_type] + new_prob) >> 1;
2543 }
2544 }
2545
2546 if (cm->current_frame.frame_type != KEY_FRAME &&
2547 cpi->sf.interp_sf.adaptive_interp_filter_search == 2 &&
2548 features->interp_filter == SWITCHABLE) {
2549 const FRAME_UPDATE_TYPE update_type =
2550 get_frame_update_type(&cpi->ppi->gf_group, cpi->gf_frame_index);
2551
2552 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS((SWITCHABLE_FILTERS + 1) * 4); i++) {
2553 int sum = 0;
2554 int j;
2555 int left = 1536;
2556
2557 for (j = 0; j < SWITCHABLE_FILTERS; j++) {
2558 sum += cpi->td.counts->switchable_interp[i][j];
2559 }
2560
2561 for (j = SWITCHABLE_FILTERS - 1; j >= 0; j--) {
2562 int update_interpfilter_frameprobs = 1;
2563 const int new_prob =
2564 sum ? 1536 * cpi->td.counts->switchable_interp[i][j] / sum
2565 : (j ? 0 : 1536);
2566#if CONFIG_FPMT_TEST0
2567 if (cpi->ppi->fpmt_unit_test_cfg == PARALLEL_SIMULATION_ENCODE) {
2568 if (cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] ==
2569 0) {
2570 int prob = (temp_frame_probs_simulation
2571 ->switchable_interp_probs[update_type][i][j] +
2572 new_prob) >>
2573 1;
2574 left -= prob;
2575 if (j == 0) prob += left;
2576 temp_frame_probs_simulation
2577 ->switchable_interp_probs[update_type][i][j] = prob;
2578 // Copy temp_frame_probs_simulation to temp_frame_probs
2579 for (int update_type_idx = 0; update_type_idx < FRAME_UPDATE_TYPES;
2580 update_type_idx++) {
2581 temp_frame_probs->switchable_interp_probs[update_type_idx][i][j] =
2582 temp_frame_probs_simulation
2583 ->switchable_interp_probs[update_type_idx][i][j];
2584 }
2585 }
2586 update_interpfilter_frameprobs = 0;
2587 }
2588#endif // CONFIG_FPMT_TEST
2589 // Track the frame probabilities of parallel encode frames to update
2590 // during postencode stage.
2591 if (cpi->ppi->gf_group.frame_parallel_level[cpi->gf_frame_index] > 0) {
2592 update_interpfilter_frameprobs = 0;
2593 cpi->frame_new_probs[cpi->num_frame_recode]
2594 .switchable_interp_probs[update_type][i][j] = new_prob;
2595 }
2596 if (update_interpfilter_frameprobs) {
2597 int prob = (frame_probs->switchable_interp_probs[update_type][i][j] +
2598 new_prob) >>
2599 1;
2600 left -= prob;
2601 if (j == 0) prob += left;
2602 frame_probs->switchable_interp_probs[update_type][i][j] = prob;
2603 }
2604 }
2605 }
2606 }
2607 if (hash_table_created) {
2608 av1_hash_table_destroy(&intrabc_hash_info->intrabc_hash_table);
2609 }
2610}
2611
2612/*!\brief Setup reference frame buffers and encode a frame
2613 *
2614 * \ingroup high_level_algo
2615 * \callgraph
2616 * \callergraph
2617 *
2618 * \param[in] cpi Top-level encoder structure
2619 */
2620void av1_encode_frame(AV1_COMP *cpi) {
2621 AV1_COMMON *const cm = &cpi->common;
2622 CurrentFrame *const current_frame = &cm->current_frame;
2623 FeatureFlags *const features = &cm->features;
2624 RD_COUNTS *const rdc = &cpi->td.rd_counts;
2625 const AV1EncoderConfig *const oxcf = &cpi->oxcf;
2626 // Indicates whether or not to use a default reduced set for ext-tx
2627 // rather than the potential full set of 16 transforms
2628 features->reduced_tx_set_used = oxcf->txfm_cfg.reduced_tx_type_set;
2629
2630 // Make sure segment_id is no larger than last_active_segid.
2631 if (cm->seg.enabled && cm->seg.update_map) {
2632 const int mi_rows = cm->mi_params.mi_rows;
2633 const int mi_cols = cm->mi_params.mi_cols;
2634 const int last_active_segid = cm->seg.last_active_segid;
2635 uint8_t *map = cpi->enc_seg.map;
2636 for (int mi_row = 0; mi_row < mi_rows; ++mi_row) {
2637 for (int mi_col = 0; mi_col < mi_cols; ++mi_col) {
2638 map[mi_col] = AOMMIN(map[mi_col], last_active_segid)(((map[mi_col]) < (last_active_segid)) ? (map[mi_col]) : (
last_active_segid))
;
2639 }
2640 map += mi_cols;
2641 }
2642 }
2643
2644 av1_setup_frame_buf_refs(cm);
2645 enforce_max_ref_frames(cpi, &cpi->ref_frame_flags,
2646 cm->cur_frame->ref_display_order_hint,
2647 cm->current_frame.display_order_hint);
2648 set_rel_frame_dist(&cpi->common, &cpi->ref_frame_dist_info,
2649 cpi->ref_frame_flags);
2650 av1_setup_frame_sign_bias(cm);
2651
2652 // If global motion is enabled, then every buffer which is used as either
2653 // a source or a ref frame should have an image pyramid allocated.
2654 // Check here so that issues can be caught early in debug mode
2655#if !defined(NDEBUG) && !CONFIG_REALTIME_ONLY0
2656 if (cpi->alloc_pyramid) {
2657 assert(cpi->source->y_pyramid)((void) sizeof ((cpi->source->y_pyramid) ? 1 : 0), __extension__
({ if (cpi->source->y_pyramid) ; else __assert_fail ("cpi->source->y_pyramid"
, "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 2657, __extension__ __PRETTY_FUNCTION__); }))
;
2658 for (int ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
2659 const RefCntBuffer *const buf = get_ref_frame_buf(cm, ref_frame);
2660 if (buf != NULL((void*)0)) {
2661 assert(buf->buf.y_pyramid)((void) sizeof ((buf->buf.y_pyramid) ? 1 : 0), __extension__
({ if (buf->buf.y_pyramid) ; else __assert_fail ("buf->buf.y_pyramid"
, "/root/firefox-clang/third_party/aom/av1/encoder/encodeframe.c"
, 2661, __extension__ __PRETTY_FUNCTION__); }))
;
2662 }
2663 }
2664 }
2665#endif // !defined(NDEBUG) && !CONFIG_REALTIME_ONLY
2666
2667#if CONFIG_MISMATCH_DEBUG0
2668 mismatch_reset_frame(av1_num_planes(cm));
2669#endif
2670
2671 rdc->newmv_or_intra_blocks = 0;
2672 cpi->palette_pixel_num = 0;
2673
2674 if (cpi->sf.hl_sf.frame_parameter_update ||
2675 cpi->sf.rt_sf.use_comp_ref_nonrd) {
2676 if (frame_is_intra_only(cm))
2677 current_frame->reference_mode = SINGLE_REFERENCE;
2678 else
2679 current_frame->reference_mode = REFERENCE_MODE_SELECT;
2680
2681 features->interp_filter = SWITCHABLE;
2682 if (cm->tiles.large_scale) features->interp_filter = EIGHTTAP_REGULAR;
2683
2684 features->switchable_motion_mode = is_switchable_motion_mode_allowed(
2685 features->allow_warped_motion, oxcf->motion_mode_cfg.enable_obmc);
2686
2687 rdc->compound_ref_used_flag = 0;
2688 rdc->skip_mode_used_flag = 0;
2689
2690 encode_frame_internal(cpi);
2691
2692 if (current_frame->reference_mode == REFERENCE_MODE_SELECT) {
2693 // Use a flag that includes 4x4 blocks
2694 if (rdc->compound_ref_used_flag == 0) {
2695 current_frame->reference_mode = SINGLE_REFERENCE;
2696#if CONFIG_ENTROPY_STATS0
2697 av1_zero(cpi->td.counts->comp_inter)memset(&(cpi->td.counts->comp_inter), 0, sizeof(cpi
->td.counts->comp_inter))
;
2698#endif // CONFIG_ENTROPY_STATS
2699 }
2700 }
2701 // Re-check on the skip mode status as reference mode may have been
2702 // changed.
2703 SkipModeInfo *const skip_mode_info = &current_frame->skip_mode_info;
2704 if (frame_is_intra_only(cm) ||
2705 current_frame->reference_mode == SINGLE_REFERENCE) {
2706 skip_mode_info->skip_mode_allowed = 0;
2707 skip_mode_info->skip_mode_flag = 0;
2708 }
2709 if (skip_mode_info->skip_mode_flag && rdc->skip_mode_used_flag == 0)
2710 skip_mode_info->skip_mode_flag = 0;
2711
2712 if (!cm->tiles.large_scale) {
2713 if (features->tx_mode == TX_MODE_SELECT &&
2714 cpi->td.mb.txfm_search_info.txb_split_count == 0)
2715 features->tx_mode = TX_MODE_LARGEST;
2716 }
2717 } else {
2718 // This is needed if real-time speed setting is changed on the fly
2719 // from one using compound prediction to one using single reference.
2720 if (current_frame->reference_mode == REFERENCE_MODE_SELECT)
2721 current_frame->reference_mode = SINGLE_REFERENCE;
2722 encode_frame_internal(cpi);
2723 }
2724}