| // Copyright 2019 Joe Drago. All rights reserved. |
| // SPDX-License-Identifier: BSD-2-Clause |
| |
| #include "avif/avif.h" |
| |
| #include <string.h> |
| |
| typedef struct avifReformatState |
| { |
| // YUV coefficients |
| float kr; |
| float kg; |
| float kb; |
| |
| avifPixelFormatInfo formatInfo; |
| avifBool usesU16; |
| } avifReformatState; |
| |
| static avifBool avifPrepareReformatState(avifImage * image, avifReformatState * state) |
| { |
| if (image->yuvFormat == AVIF_PIXEL_FORMAT_NONE) { |
| return AVIF_FALSE; |
| } |
| avifGetPixelFormatInfo(image->yuvFormat, &state->formatInfo); |
| |
| // TODO: calculate coefficients |
| state->kr = 0.2126f; |
| state->kb = 0.0722f; |
| state->kg = 1.0f - state->kr - state->kb; |
| |
| state->usesU16 = avifImageUsesU16(image); |
| return AVIF_TRUE; |
| } |
| |
| avifResult avifImageRGBToYUV(avifImage * image) |
| { |
| if (!image->rgbPlanes[AVIF_CHAN_R] || !image->rgbPlanes[AVIF_CHAN_G] || !image->rgbPlanes[AVIF_CHAN_B]) { |
| return AVIF_RESULT_REFORMAT_FAILED; |
| } |
| |
| avifReformatState state; |
| if (!avifPrepareReformatState(image, &state)) { |
| return AVIF_RESULT_REFORMAT_FAILED; |
| } |
| |
| avifImageAllocatePlanes(image, AVIF_PLANES_YUV); |
| |
| const float kr = state.kr; |
| const float kg = state.kg; |
| const float kb = state.kb; |
| |
| float yuvPixel[3]; |
| float rgbPixel[3]; |
| int yuvUNorm[3]; |
| float maxChannel = (float)((1 << image->depth) - 1); |
| for (int j = 0; j < image->height; ++j) { |
| for (int i = 0; i < image->width; ++i) { |
| // Unpack RGB into normalized float |
| if (state.usesU16) { |
| rgbPixel[0] = *((uint16_t *)(&image->rgbPlanes[AVIF_CHAN_R][(i * 2) + (j * image->rgbRowBytes[AVIF_CHAN_R])])) / maxChannel; |
| rgbPixel[1] = *((uint16_t *)(&image->rgbPlanes[AVIF_CHAN_G][(i * 2) + (j * image->rgbRowBytes[AVIF_CHAN_G])])) / maxChannel; |
| rgbPixel[2] = *((uint16_t *)(&image->rgbPlanes[AVIF_CHAN_B][(i * 2) + (j * image->rgbRowBytes[AVIF_CHAN_B])])) / maxChannel; |
| } else { |
| rgbPixel[0] = image->rgbPlanes[AVIF_CHAN_R][i + (j * image->rgbRowBytes[AVIF_CHAN_R])] / maxChannel; |
| rgbPixel[1] = image->rgbPlanes[AVIF_CHAN_G][i + (j * image->rgbRowBytes[AVIF_CHAN_G])] / maxChannel; |
| rgbPixel[2] = image->rgbPlanes[AVIF_CHAN_B][i + (j * image->rgbRowBytes[AVIF_CHAN_B])] / maxChannel; |
| } |
| |
| // RGB -> YUV conversion |
| float Y = (kr * rgbPixel[0]) + (kg * rgbPixel[1]) + (kb * rgbPixel[2]); |
| yuvPixel[0] = Y; |
| yuvPixel[1] = (rgbPixel[2] - Y) / (2 * (1 - kb)); |
| yuvPixel[2] = (rgbPixel[0] - Y) / (2 * (1 - kr)); |
| |
| // Stuff YUV into unorm color layer |
| yuvPixel[0] = AVIF_CLAMP(yuvPixel[0], 0.0f, 1.0f); |
| yuvPixel[1] += 0.5f; |
| yuvPixel[1] = AVIF_CLAMP(yuvPixel[1], 0.0f, 1.0f); |
| yuvPixel[2] += 0.5f; |
| yuvPixel[2] = AVIF_CLAMP(yuvPixel[2], 0.0f, 1.0f); |
| yuvUNorm[0] = (int)avifRoundf(yuvPixel[0] * maxChannel); |
| yuvUNorm[1] = (int)avifRoundf(yuvPixel[1] * maxChannel); |
| yuvUNorm[2] = (int)avifRoundf(yuvPixel[2] * maxChannel); |
| |
| // adjust for limited/full color range, if need be |
| if (image->yuvRange == AVIF_RANGE_LIMITED) { |
| yuvUNorm[0] = avifFullToLimited(image->depth, yuvUNorm[0]); |
| yuvUNorm[1] = avifFullToLimited(image->depth, yuvUNorm[1]); |
| yuvUNorm[2] = avifFullToLimited(image->depth, yuvUNorm[2]); |
| } |
| |
| int uvI = i >> state.formatInfo.chromaShiftX; |
| int uvJ = j >> state.formatInfo.chromaShiftY; |
| if (state.usesU16) { |
| uint16_t * pY = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_Y][(i * 2) + (j * image->yuvRowBytes[AVIF_CHAN_Y])]; |
| *pY = (uint16_t)yuvUNorm[0]; |
| uint16_t * pU = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_U][(uvI * 2) + (uvJ * image->yuvRowBytes[AVIF_CHAN_U])]; |
| *pU = (uint16_t)yuvUNorm[1]; |
| uint16_t * pV = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_V][(uvI * 2) + (uvJ * image->yuvRowBytes[AVIF_CHAN_V])]; |
| *pV = (uint16_t)yuvUNorm[2]; |
| } else { |
| image->yuvPlanes[AVIF_CHAN_Y][i + (j * image->yuvRowBytes[AVIF_CHAN_Y])] = (uint8_t)yuvUNorm[0]; |
| image->yuvPlanes[AVIF_CHAN_U][uvI + (uvJ * image->yuvRowBytes[AVIF_CHAN_U])] = (uint8_t)yuvUNorm[1]; |
| image->yuvPlanes[AVIF_CHAN_V][uvI + (uvJ * image->yuvRowBytes[AVIF_CHAN_V])] = (uint8_t)yuvUNorm[2]; |
| } |
| } |
| } |
| |
| return AVIF_RESULT_OK; |
| } |
| |
| avifResult avifImageYUVToRGB(avifImage * image) |
| { |
| if (!image->yuvPlanes[AVIF_CHAN_Y] || !image->yuvPlanes[AVIF_CHAN_U] || !image->yuvPlanes[AVIF_CHAN_V]) { |
| return AVIF_RESULT_REFORMAT_FAILED; |
| } |
| |
| avifReformatState state; |
| if (!avifPrepareReformatState(image, &state)) { |
| return AVIF_RESULT_REFORMAT_FAILED; |
| } |
| |
| avifImageAllocatePlanes(image, AVIF_PLANES_RGB); |
| |
| const float kr = state.kr; |
| const float kg = state.kg; |
| const float kb = state.kb; |
| |
| int yuvUNorm[3]; |
| float yuvPixel[3]; |
| float rgbPixel[3]; |
| float maxChannel = (float)((1 << image->depth) - 1); |
| for (int j = 0; j < image->height; ++j) { |
| for (int i = 0; i < image->width; ++i) { |
| // Unpack YUV into unorm |
| int uvI = i >> state.formatInfo.chromaShiftX; |
| int uvJ = j >> state.formatInfo.chromaShiftY; |
| if (state.usesU16) { |
| yuvUNorm[0] = *((uint16_t *)&image->yuvPlanes[AVIF_CHAN_Y][(i * 2) + (j * image->yuvRowBytes[AVIF_CHAN_Y])]); |
| yuvUNorm[1] = *((uint16_t *)&image->yuvPlanes[AVIF_CHAN_U][(uvI * 2) + (uvJ * image->yuvRowBytes[AVIF_CHAN_U])]); |
| yuvUNorm[2] = *((uint16_t *)&image->yuvPlanes[AVIF_CHAN_V][(uvI * 2) + (uvJ * image->yuvRowBytes[AVIF_CHAN_V])]); |
| } else { |
| yuvUNorm[0] = image->yuvPlanes[AVIF_CHAN_Y][i + (j * image->yuvRowBytes[AVIF_CHAN_Y])]; |
| yuvUNorm[1] = image->yuvPlanes[AVIF_CHAN_U][uvI + (uvJ * image->yuvRowBytes[AVIF_CHAN_U])]; |
| yuvUNorm[2] = image->yuvPlanes[AVIF_CHAN_V][uvI + (uvJ * image->yuvRowBytes[AVIF_CHAN_V])]; |
| |
| } |
| |
| // adjust for limited/full color range, if need be |
| if (image->yuvRange == AVIF_RANGE_LIMITED) { |
| yuvUNorm[0] = avifLimitedToFull(image->depth, yuvUNorm[0]); |
| yuvUNorm[1] = avifLimitedToFull(image->depth, yuvUNorm[1]); |
| yuvUNorm[2] = avifLimitedToFull(image->depth, yuvUNorm[2]); |
| } |
| |
| // Convert unorm to float |
| yuvPixel[0] = yuvUNorm[0] / maxChannel; |
| yuvPixel[1] = yuvUNorm[1] / maxChannel; |
| yuvPixel[2] = yuvUNorm[2] / maxChannel; |
| yuvPixel[1] -= 0.5f; |
| yuvPixel[2] -= 0.5f; |
| |
| float Y = yuvPixel[0]; |
| float Cb = yuvPixel[1]; |
| float Cr = yuvPixel[2]; |
| |
| float R = Y + (2 * (1 - kr)) * Cr; |
| float B = Y + (2 * (1 - kb)) * Cb; |
| float G = Y - ( |
| (2 * ((kr * (1 - kr) * Cr) + (kb * (1 - kb) * Cb))) |
| / |
| kg); |
| |
| rgbPixel[0] = AVIF_CLAMP(R, 0.0f, 1.0f); |
| rgbPixel[1] = AVIF_CLAMP(G, 0.0f, 1.0f); |
| rgbPixel[2] = AVIF_CLAMP(B, 0.0f, 1.0f); |
| |
| if (state.usesU16) { |
| uint16_t * pR = (uint16_t *)&image->rgbPlanes[AVIF_CHAN_R][(i * 2) + (j * image->rgbRowBytes[AVIF_CHAN_R])]; |
| *pR = (uint16_t)avifRoundf(rgbPixel[0] * maxChannel); |
| uint16_t * pG = (uint16_t *)&image->rgbPlanes[AVIF_CHAN_G][(i * 2) + (j * image->rgbRowBytes[AVIF_CHAN_G])]; |
| *pG = (uint16_t)avifRoundf(rgbPixel[1] * maxChannel); |
| uint16_t * pB = (uint16_t *)&image->rgbPlanes[AVIF_CHAN_B][(i * 2) + (j * image->rgbRowBytes[AVIF_CHAN_B])]; |
| *pB = (uint16_t)avifRoundf(rgbPixel[2] * maxChannel); |
| } else { |
| image->rgbPlanes[AVIF_CHAN_R][i + (j * image->rgbRowBytes[AVIF_CHAN_R])] = (uint8_t)avifRoundf(rgbPixel[0] * maxChannel); |
| image->rgbPlanes[AVIF_CHAN_G][i + (j * image->rgbRowBytes[AVIF_CHAN_G])] = (uint8_t)avifRoundf(rgbPixel[1] * maxChannel); |
| image->rgbPlanes[AVIF_CHAN_B][i + (j * image->rgbRowBytes[AVIF_CHAN_B])] = (uint8_t)avifRoundf(rgbPixel[2] * maxChannel); |
| } |
| } |
| } |
| return AVIF_RESULT_OK; |
| } |
| |
| int avifLimitedToFull(int depth, int v) |
| { |
| switch (depth) { |
| case 8: |
| v = ((v - 16) * 255) / (235 - 16); |
| v = AVIF_CLAMP(v, 0, 255); |
| return v; |
| case 10: |
| v = ((v - 64) * 1023) / (940 - 64); |
| v = AVIF_CLAMP(v, 0, 1023); |
| return v; |
| case 12: |
| v = ((v - 256) * 4095) / (3760 - 256); |
| v = AVIF_CLAMP(v, 0, 4095); |
| return v; |
| } |
| return v; |
| } |
| |
| int avifFullToLimited(int depth, int v) |
| { |
| switch (depth) { |
| case 8: |
| v = ((v * (235 - 16)) / 255) + 16; |
| v = AVIF_CLAMP(v, 16, 235); |
| return v; |
| case 10: |
| v = ((v * (940 - 64)) / 1023) + 64; |
| v = AVIF_CLAMP(v, 64, 940); |
| return v; |
| case 12: |
| v = ((v * (3760 - 256)) / 4095) + 256; |
| v = AVIF_CLAMP(v, 256, 3760); |
| return v; |
| } |
| return v; |
| } |
| |
| #if 0 |
| // debug code for limited/full charting |
| for (int v = 0; v < 4096; ++v) { |
| int limited8 = avifFullToLimited(8, v); |
| int full8 = avifLimitedToFull(8, limited8); |
| int limited10 = avifFullToLimited(10, v); |
| int full10 = avifLimitedToFull(10, limited10); |
| int limited12 = avifFullToLimited(12, v); |
| int full12 = avifLimitedToFull(12, limited12); |
| printf("Code %d: [ 8bit Limited %d -> Full %d ] [10bit Limited %d -> Full %d ] [12bit Limited %d -> Full %d ]\n", |
| v, |
| limited8, full8, |
| limited10, full10, |
| limited12, full12); |
| } |
| #endif |