Correct signed shift mistake in NnpredictTest
In the test suite for the neural network predictor I used (1 << 31) when
I meant to use (1u << 31) when generating random test data, resulting in
INT_MAX rather than UINT_MAX/2. This is of no significance to the tests
(it simply flips the sign of the random data) but could be confusing to
future readers.
Change-Id: I7853934da441512347fc9a831a01cc20ee29ad77
diff --git a/test/av1_nn_predict_test.cc b/test/av1_nn_predict_test.cc
index a193ab3..4a36428 100644
--- a/test/av1_nn_predict_test.cc
+++ b/test/av1_nn_predict_test.cc
@@ -102,25 +102,25 @@
for (int iter = 0; iter < 10000 && !HasFatalFailure(); ++iter) {
for (int node = 0; node < shape->num_inputs; node++) {
- inputs[node] = ((float)rng_.Rand31() - (1 << 30)) / (1 << 31);
+ inputs[node] = ((float)rng_.Rand31() - (1 << 30)) / (1u << 31);
}
for (int layer = 0; layer < shape->num_hidden_layers; layer++) {
for (int node = 0; node < NN_MAX_NODES_PER_LAYER; node++) {
- bias[layer][node] = ((float)rng_.Rand31() - (1 << 30)) / (1 << 31);
+ bias[layer][node] = ((float)rng_.Rand31() - (1 << 30)) / (1u << 31);
}
for (int node = 0; node < NN_MAX_NODES_PER_LAYER * NN_MAX_NODES_PER_LAYER;
node++) {
- weights[layer][node] = ((float)rng_.Rand31() - (1 << 30)) / (1 << 31);
+ weights[layer][node] = ((float)rng_.Rand31() - (1 << 30)) / (1u << 31);
}
}
// Now the outputs:
int layer = shape->num_hidden_layers;
for (int node = 0; node < NN_MAX_NODES_PER_LAYER; node++) {
- bias[layer][node] = ((float)rng_.Rand31() - (1 << 30)) / (1 << 31);
+ bias[layer][node] = ((float)rng_.Rand31() - (1 << 30)) / (1u << 31);
}
for (int node = 0; node < NN_MAX_NODES_PER_LAYER * NN_MAX_NODES_PER_LAYER;
node++) {
- weights[layer][node] = ((float)rng_.Rand31() - (1 << 30)) / (1 << 31);
+ weights[layer][node] = ((float)rng_.Rand31() - (1 << 30)) / (1u << 31);
}
av1_nn_predict_c(inputs, &nn_config, outputs_ref);