From 6dc38d71f74affa9c7892c149c93b842c7c56ccb Mon Sep 17 00:00:00 2001 From: Ivailo Monev Date: Fri, 20 Mar 2020 23:49:11 +0000 Subject: [PATCH] move misc drawing helper functions to the top of qdrawhelper source Signed-off-by: Ivailo Monev --- src/gui/painting/qdrawhelper.cpp | 1714 +++++++++++++++--------------- src/gui/painting/qdrawhelper_p.h | 30 +- 2 files changed, 865 insertions(+), 879 deletions(-) diff --git a/src/gui/painting/qdrawhelper.cpp b/src/gui/painting/qdrawhelper.cpp index 9955348fc..aa5ac1d5b 100644 --- a/src/gui/painting/qdrawhelper.cpp +++ b/src/gui/painting/qdrawhelper.cpp @@ -41,6 +41,863 @@ QT_BEGIN_NAMESPACE // must be multiple of 4 for easier SIMD implementations static const int buffer_size = 2048; +static inline QRgb qConvertRgb16To32(uint c) +{ + return 0xff000000 + | ((((c) << 3) & 0xf8) | (((c) >> 2) & 0x7)) + | ((((c) << 5) & 0xfc00) | (((c) >> 1) & 0x300)) + | ((((c) << 8) & 0xf80000) | (((c) << 3) & 0x70000)); +} + +template +inline void madd_2(DST *dest, const quint16 alpha, const SRC *src) +{ + Q_ASSERT((quintptr(dest) & 0x3) == 0); + Q_ASSERT((quintptr(src) & 0x3) == 0); + dest[0] = dest[0].byte_mul(alpha >> 8) + DST(src[0]); + dest[1] = dest[1].byte_mul(alpha & 0xff) + DST(src[1]); +} + +template +inline void madd_4(DST *dest, const quint32 alpha, const SRC *src) +{ + Q_ASSERT((quintptr(dest) & 0x3) == 0); + Q_ASSERT((quintptr(src) & 0x3) == 0); + dest[0] = dest[0].byte_mul(alpha >> 24) + DST(src[0]); + dest[1] = dest[1].byte_mul((alpha >> 16) & 0xff) + DST(src[1]); + dest[2] = dest[2].byte_mul((alpha >> 8) & 0xff) + DST(src[2]); + dest[3] = dest[3].byte_mul(alpha & 0xff) + DST(src[3]); +} + +#if Q_BYTE_ORDER == Q_LITTLE_ENDIAN +template <> +inline void madd_4(qargb8565 *dest, const quint32 a, const qargb8565 *src) +{ + Q_ASSERT((quintptr(dest) & 0x3) == 0); + Q_ASSERT((quintptr(src) & 0x3) == 0); + + const quint32 *src32 = reinterpret_cast(src); + quint32 *dest32 = reinterpret_cast(dest); + quint32 x, y, t; + quint8 a8; + + { + x = dest32[0]; + y = src32[0]; + + a8 = a >> 24; + + // a0,g0 + t = ((((x & 0x0007e0ff) * a8) >> 5) & 0x0007e0ff) + (y & 0x0007c0f8); + + // r0,b0 + t |= ((((x & 0x00f81f00) * a8) >> 5) & 0x00f81f00) + (y & 0x00f81f00); + + a8 = (a >> 16) & 0xff; + + // a1 + t |= ((((x & 0xff000000) >> 5) * a8) & 0xff000000) + (y & 0xf8000000); + + dest32[0] = t; + } + { + x = dest32[1]; + y = src32[1]; + + // r1,b1 + t = ((((x & 0x0000f81f) * a8) >> 5) & 0x0000f81f) + (y & 0x0000f81f); + + // g1 + t |= ((((x & 0x000007e0) * a8) >> 5) & 0x000007e0) + (y & 0x000007c0); + + a8 = (a >> 8) & 0xff; + + // a2 + t |= ((((x & 0x00ff0000) * a8) >> 5) & 0x00ff0000) + (y & 0x00f80000); + + { + // rgb2 + quint16 x16 = (x >> 24) | ((dest32[2] & 0x000000ff) << 8); + quint16 y16 = (y >> 24) | ((src32[2] & 0x000000ff) << 8); + quint16 t16; + + t16 = ((((x16 & 0xf81f) * a8) >> 5) & 0xf81f) + (y16 & 0xf81f); + t16 |= ((((x16 & 0x07e0) * a8) >> 5) & 0x07e0) + (y16 & 0x07c0); + + // rg2 + t |= ((t16 & 0x00ff) << 24); + + dest32[1] = t; + + x = dest32[2]; + y = src32[2]; + + // gb2 + t = (t16 >> 8); + } + } + { + a8 = a & 0xff; + + // g3,a3 + t |= ((((x & 0x07e0ff00) * a8) >> 5) & 0x07e0ff00) + (y & 0x07c0f800); + + // r3,b3 + t |= ((((x & 0xf81f0000) >> 5) * a8) & 0xf81f0000)+ (y & 0xf81f0000); + + dest32[2] = t; + } +} + +template <> +inline void madd_4(qargb8555 *dest, const quint32 a, const qargb8555 *src) +{ + Q_ASSERT((quintptr(dest) & 0x3) == 0); + Q_ASSERT((quintptr(src) & 0x3) == 0); + + const quint32 *src32 = reinterpret_cast(src); + quint32 *dest32 = reinterpret_cast(dest); + quint32 x, y, t; + quint8 a8; + + { + x = dest32[0]; + y = src32[0]; + + a8 = a >> 24; + + // a0,g0 + t = ((((x & 0x0003e0ff) * a8) >> 5) & 0x0003e0ff) + (y & 0x0003e0f8); + + // r0,b0 + t |= ((((x & 0x007c1f00) * a8) >> 5) & 0x007c1f00) + (y & 0x007c1f00); + + a8 = (a >> 16) & 0xff; + + // a1 + t |= ((((x & 0xff000000) >> 5) * a8) & 0xff000000) + (y & 0xf8000000); + + dest32[0] = t; + } + { + x = dest32[1]; + y = src32[1]; + + // r1,b1 + t = ((((x & 0x00007c1f) * a8) >> 5) & 0x00007c1f) + (y & 0x00007c1f); + + // g1 + t |= ((((x & 0x000003e0) * a8) >> 5) & 0x000003e0) + (y & 0x000003e0); + + a8 = (a >> 8) & 0xff; + + // a2 + t |= ((((x & 0x00ff0000) * a8) >> 5) & 0x00ff0000) + (y & 0x00f80000); + + { + // rgb2 + quint16 x16 = (x >> 24) | ((dest32[2] & 0x000000ff) << 8); + quint16 y16 = (y >> 24) | ((src32[2] & 0x000000ff) << 8); + quint16 t16; + + t16 = ((((x16 & 0x7c1f) * a8) >> 5) & 0x7c1f) + (y16 & 0x7c1f); + t16 |= ((((x16 & 0x03e0) * a8) >> 5) & 0x03e0) + (y16 & 0x03e0); + + // rg2 + t |= ((t16 & 0x00ff) << 24); + + dest32[1] = t; + + x = dest32[2]; + y = src32[2]; + + // gb2 + t = (t16 >> 8); + } + } + { + a8 = a & 0xff; + + // g3,a3 + t |= ((((x & 0x03e0ff00) * a8) >> 5) & 0x03e0ff00) + (y & 0x03e0f800); + + // r3,b3 + t |= ((((x & 0x7c1f0000) >> 5) * a8) & 0x7c1f0000)+ (y & 0x7c1f0000); + + dest32[2] = t; + } +} +#endif // Q_BYTE_ORDER + +template +inline quint16 alpha_2(const T *src) +{ + Q_ASSERT((quintptr(src) & 0x3) == 0); + + if (T::hasAlpha()) + return (src[0].alpha() << 8) | src[1].alpha(); + else + return 0xffff; +} + +template <> +inline quint16 alpha_2(const qargb4444 *src) +{ + const quint32 *src32 = reinterpret_cast(src); + const quint32 t = (*src32 & 0xf000f000) | + ((*src32 & 0xf000f000) >> 4); + return (t >> 24) | (t & 0xff00); +} + +template +inline quint32 alpha_4(const T *src) +{ + Q_ASSERT((quintptr(src) & 0x3) == 0); + + if (T::hasAlpha()) { + return (src[0].alpha() << 24) | (src[1].alpha() << 16) + | (src[2].alpha() << 8) | src[3].alpha(); + } else { + return 0xffffffff; + } +} + +template <> +inline quint32 alpha_4(const qargb8565 *src) +{ + const quint8 *src8 = reinterpret_cast(src); + return src8[0] << 24 | src8[3] << 16 | src8[6] << 8 | src8[9]; +} + +template <> +inline quint32 alpha_4(const qargb6666 *src) +{ + const quint8 *src8 = reinterpret_cast(src); + return ((src8[2] & 0xfc) | (src8[2] >> 6)) << 24 + | ((src8[5] & 0xfc) | (src8[5] >> 6)) << 16 + | ((src8[8] & 0xfc) | (src8[8] >> 6)) << 8 + | ((src8[11] & 0xfc) | (src8[11] >> 6)); +} + +template <> +inline quint32 alpha_4(const qargb8555 *src) +{ + Q_ASSERT((quintptr(src) & 0x3) == 0); + const quint8 *src8 = reinterpret_cast(src); + return src8[0] << 24 | src8[3] << 16 | src8[6] << 8 | src8[9]; +} + +template +inline quint16 eff_alpha_2(quint16 alpha, const T*) +{ + return (T::alpha((alpha >> 8) & 0xff) << 8) + | T::alpha(alpha & 0xff); +} + +template <> +inline quint16 eff_alpha_2(quint16 a, const qrgb565*) +{ + return ((((a & 0xff00) + 0x0100) >> 3) & 0xff00) + | ((((a & 0x00ff) + 0x0001) >> 3) & 0x00ff); +} + +template <> +inline quint16 eff_alpha_2(quint16 a, const qrgb444*) +{ + return (((a & 0x00ff) + 0x0001) >> 4) + | ((((a & 0xff00) + 0x0100) >> 4) & 0xff00); +} + +template <> +inline quint16 eff_alpha_2(quint16 a, const qargb4444*) +{ + return (((a & 0x00ff) + 0x0001) >> 4) + | ((((a & 0xff00) + 0x0100) >> 4) & 0xff00); +} + +template +inline quint16 eff_ialpha_2(quint16 alpha, const T*) +{ + return (T::ialpha((alpha >> 8) & 0xff) << 8) + | T::ialpha(alpha & 0xff); +} + +template <> +inline quint16 eff_ialpha_2(quint16 a, const qrgb565 *dummy) +{ + return 0x2020 - eff_alpha_2(a, dummy); +} + +template <> +inline quint16 eff_ialpha_2(quint16 a, const qargb4444 *dummy) +{ + return 0x1010 - eff_alpha_2(a, dummy); +} + +template <> +inline quint16 eff_ialpha_2(quint16 a, const qrgb444 *dummy) +{ + return 0x1010 - eff_alpha_2(a, dummy); +} + +template +inline quint32 eff_alpha_4(quint32 alpha, const T*) +{ + return (T::alpha(alpha >> 24) << 24) + | (T::alpha((alpha >> 16) & 0xff) << 16) + | (T::alpha((alpha >> 8) & 0xff) << 8) + | T::alpha(alpha & 0xff); +} + +template <> +inline quint32 eff_alpha_4(quint32 a, const qrgb888*) +{ + return a; +} + +template <> +inline quint32 eff_alpha_4(quint32 a, const qargb8565*) +{ + return ((((a & 0xff00ff00) + 0x01000100) >> 3) & 0xff00ff00) + | ((((a & 0x00ff00ff) + 0x00010001) >> 3) & 0x00ff00ff); +} + +template <> +inline quint32 eff_alpha_4(quint32 a, const qargb6666*) +{ + return ((((a & 0xff00ff00) >> 2) + 0x00400040) & 0xff00ff00) + | ((((a & 0x00ff00ff) + 0x00010001) >> 2) & 0x00ff00ff); +} + +template <> +inline quint32 eff_alpha_4(quint32 a, const qrgb666*) +{ + return ((((a & 0xff00ff00) >> 2) + 0x00400040) & 0xff00ff00) + | ((((a & 0x00ff00ff) + 0x00010001) >> 2) & 0x00ff00ff); +} + +template <> +inline quint32 eff_alpha_4(quint32 a, const qargb8555*) +{ + return ((((a & 0xff00ff00) + 0x01000100) >> 3) & 0xff00ff00) + | ((((a & 0x00ff00ff) + 0x00010001) >> 3) & 0x00ff00ff); +} + +template +inline quint32 eff_ialpha_4(quint32 alpha, const T*) +{ + return (T::ialpha(alpha >> 24) << 24) + | (T::ialpha((alpha >> 16) & 0xff) << 16) + | (T::ialpha((alpha >> 8) & 0xff) << 8) + | T::ialpha(alpha & 0xff); +} + +template <> +inline quint32 eff_ialpha_4(quint32 a, const qrgb888*) +{ + return ~a; +} + +template <> +inline quint32 eff_ialpha_4(quint32 a, const qargb8565 *dummy) +{ + return 0x20202020 - eff_alpha_4(a, dummy); +} + +template <> +inline quint32 eff_ialpha_4(quint32 a, const qargb6666 *dummy) +{ + return 0x40404040 - eff_alpha_4(a, dummy); +} + +template <> +inline quint32 eff_ialpha_4(quint32 a, const qrgb666 *dummy) +{ + return 0x40404040 - eff_alpha_4(a, dummy); +} + +template <> +inline quint32 eff_ialpha_4(quint32 a, const qargb8555 *dummy) +{ + return 0x20202020 - eff_alpha_4(a, dummy); +} + +template +inline void interpolate_pixel(DST &dest, quint8 a, const SRC &src, quint8 b) +{ + if (SRC::hasAlpha() && !DST::hasAlpha()) + interpolate_pixel(dest, a, DST(src), b); + else + dest = dest.byte_mul(a) + DST(src).byte_mul(b); +} + +template <> +inline void interpolate_pixel(qargb8565 &dest, quint8 a, + const qargb8565 &src, quint8 b) +{ + quint8 *d = reinterpret_cast(&dest); + const quint8 *s = reinterpret_cast(&src); + d[0] = (d[0] * a + s[0] * b) >> 5; + + const quint16 x = (d[2] << 8) | d[1]; + const quint16 y = (s[2] << 8) | s[1]; + quint16 t = (((x & 0x07e0) * a + (y & 0x07e0) * b) >> 5) & 0x07e0; + t |= (((x & 0xf81f) * a + (y & 0xf81f) * b) >> 5) & 0xf81f; + + d[1] = t & 0xff; + d[2] = t >> 8; +} + +template <> +inline void interpolate_pixel(qrgb565 &dest, quint8 a, + const qrgb565 &src, quint8 b) +{ + const quint16 x = dest.rawValue(); + const quint16 y = src.rawValue(); + quint16 t = (((x & 0x07e0) * a + (y & 0x07e0) * b) >> 5) & 0x07e0; + t |= (((x & 0xf81f) * a + (y & 0xf81f) * b) >> 5) & 0xf81f; + dest = t; +} + +template <> +inline void interpolate_pixel(qrgb555 &dest, quint8 a, + const qrgb555 &src, quint8 b) +{ + const quint16 x = dest.rawValue(); + const quint16 y = src.rawValue(); + quint16 t = (((x & 0x03e0) * a + (y & 0x03e0) * b) >> 5) & 0x03e0; + t |= ((((x & 0x7c1f) * a) + ((y & 0x7c1f) * b)) >> 5) & 0x7c1f; + dest = t; +} + +template <> +inline void interpolate_pixel(qrgb444 &dest, quint8 a, + const qrgb444 &src, quint8 b) +{ + const quint16 x = dest.rawValue(); + const quint16 y = src.rawValue(); + quint16 t = ((x & 0x00f0) * a + (y & 0x00f0) * b) & 0x0f00; + t |= ((x & 0x0f0f) * a + (y & 0x0f0f) * b) & 0xf0f0; + quint16 *d = reinterpret_cast(&dest); + *d = (t >> 4); +} + +template +inline void interpolate_pixel_2(DST *dest, const SRC *src, quint16 alpha) +{ + Q_ASSERT((quintptr(dest) & 0x3) == 0); + Q_ASSERT((quintptr(src) & 0x3) == 0); + + const quint16 a = eff_alpha_2(alpha, dest); + const quint16 ia = eff_ialpha_2(alpha, dest); + + dest[0] = DST(src[0]).byte_mul(a >> 8) + dest[0].byte_mul(ia >> 8); + dest[1] = DST(src[1]).byte_mul(a & 0xff) + dest[1].byte_mul(ia & 0xff); +} + +template +inline void interpolate_pixel_2(DST *dest, quint8 a, + const SRC *src, quint8 b) +{ + Q_ASSERT((quintptr(dest) & 0x3) == 0); + Q_ASSERT((quintptr(src) & 0x3) == 0); + + Q_ASSERT(!SRC::hasAlpha()); + + dest[0] = dest[0].byte_mul(a) + DST(src[0]).byte_mul(b); + dest[1] = dest[1].byte_mul(a) + DST(src[1]).byte_mul(b); +} + +template <> +inline void interpolate_pixel_2(qrgb565 *dest, quint8 a, + const qrgb565 *src, quint8 b) +{ + quint32 *x = reinterpret_cast(dest); + const quint32 *y = reinterpret_cast(src); + quint32 t = (((*x & 0xf81f07e0) >> 5) * a + + ((*y & 0xf81f07e0) >> 5) * b) & 0xf81f07e0; + t |= (((*x & 0x07e0f81f) * a + + (*y & 0x07e0f81f) * b) >> 5) & 0x07e0f81f; + *x = t; +} + +template <> +inline void interpolate_pixel_2(qrgb555 *dest, quint8 a, + const qrgb555 *src, quint8 b) +{ + quint32 *x = reinterpret_cast(dest); + const quint32 *y = reinterpret_cast(src); + quint32 t = (((*x & 0x7c1f03e0) >> 5) * a + + ((*y & 0x7c1f03e0) >> 5) * b) & 0x7c1f03e0; + t |= (((*x & 0x03e07c1f) * a + + (*y & 0x03e07c1f) * b) >> 5) & 0x03e07c1f; + *x = t; +} + +template <> +inline void interpolate_pixel_2(qrgb444 *dest, quint8 a, + const qrgb444 *src, quint8 b) +{ + quint32 *x = reinterpret_cast(dest); + const quint32 *y = reinterpret_cast(src); + quint32 t = ((*x & 0x0f0f0f0f) * a + (*y & 0x0f0f0f0f) * b) & 0xf0f0f0f0; + t |= ((*x & 0x00f000f0) * a + (*y & 0x00f000f0) * b) & 0x0f000f00; + *x = t >> 4; +} + +template +inline void interpolate_pixel_4(DST *dest, const SRC *src, quint32 alpha) +{ + Q_ASSERT((quintptr(dest) & 0x3) == 0); + Q_ASSERT((quintptr(src) & 0x3) == 0); + + const quint32 a = eff_alpha_4(alpha, dest); + const quint32 ia = eff_ialpha_4(alpha, dest); + dest[0] = DST(src[0]).byte_mul(a >> 24) + + dest[0].byte_mul(ia >> 24); + dest[1] = DST(src[1]).byte_mul((a >> 16) & 0xff) + + dest[1].byte_mul((ia >> 16) & 0xff); + dest[2] = DST(src[2]).byte_mul((a >> 8) & 0xff) + + dest[2].byte_mul((ia >> 8) & 0xff); + dest[3] = DST(src[3]).byte_mul(a & 0xff) + + dest[3].byte_mul(ia & 0xff); +} + +#if Q_BYTE_ORDER == Q_LITTLE_ENDIAN +template <> +inline void interpolate_pixel_4(qargb8565 *dest, const qargb8565 *src, + quint32 alpha) +{ + Q_ASSERT((quintptr(dest) & 0x3) == 0); + Q_ASSERT((quintptr(src) & 0x3) == 0); + + const quint32 a = eff_alpha_4(alpha, dest); + const quint32 ia = eff_ialpha_4(alpha, dest); + const quint32 *src32 = reinterpret_cast(src); + quint32 *dest32 = reinterpret_cast(dest); + + quint32 x, y, t; + quint8 a8, ia8; + { + x = src32[0]; + y = dest32[0]; + + a8 = a >> 24; + ia8 = ia >> 24; + + // a0,g0 + t = (((x & 0x0007e0ff) * a8 + (y & 0x0007e0ff) * ia8) >> 5) + & 0x0007e0ff; + + // r0,b0 + t |= (((x & 0x00f81f00) * a8 + (y & 0x00f81f00) * ia8) >> 5) + & 0x00f81f00; + + a8 = (a >> 16) & 0xff; + ia8 = (ia >> 16) & 0xff; + + // a1 + t |= (((x & 0xff000000) >> 5) * a8 + ((y & 0xff000000) >> 5) * ia8) + & 0xff000000; + + dest32[0] = t; + } + { + x = src32[1]; + y = dest32[1]; + + // r1,b1 + t = (((x & 0x0000f81f) * a8 + (y & 0x0000f81f) * ia8) >> 5) + & 0x0000f81f; + + // g1 + t |= (((x & 0x000007e0) * a8 + (y & 0x000007e0) * ia8) >> 5) + & 0x000007e0; + + a8 = (a >> 8) & 0xff; + ia8 = (ia >> 8) & 0xff; + + // a2 + t |= (((x & 0x00ff0000) * a8 + (y & 0x00ff0000) * ia8) >> 5) + & 0x00ff0000; + + { + // rgb2 + quint16 x16 = (x >> 24) | ((src32[2] & 0x000000ff) << 8); + quint16 y16 = (y >> 24) | ((dest32[2] & 0x000000ff) << 8); + quint16 t16; + + t16 = (((x16 & 0xf81f) * a8 + (y16 & 0xf81f) * ia8) >> 5) & 0xf81f; + t16 |= (((x16 & 0x07e0) * a8 + (y16 & 0x07e0) * ia8) >> 5) & 0x07e0; + + // rg2 + t |= ((t16 & 0x00ff) << 24); + + dest32[1] = t; + + x = src32[2]; + y = dest32[2]; + + // gb2 + t = (t16 >> 8); + } + } + { + a8 = a & 0xff; + ia8 = ia & 0xff; + + // g3,a3 + t |= (((x & 0x07e0ff00) * a8 + (y & 0x07e0ff00) * ia8) >> 5) + & 0x07e0ff00; + + // r3,b3 + t |= (((x & 0xf81f0000) >> 5) * a8 + ((y & 0xf81f0000) >> 5) * ia8) + & 0xf81f0000; + + dest32[2] = t; + } +} + +template <> +inline void interpolate_pixel_4(qargb8555 *dest, const qargb8555 *src, + quint32 alpha) +{ + Q_ASSERT((quintptr(dest) & 0x3) == 0); + Q_ASSERT((quintptr(src) & 0x3) == 0); + + + const quint32 a = eff_alpha_4(alpha, dest); + const quint32 ia = eff_ialpha_4(alpha, dest); + const quint32 *src32 = reinterpret_cast(src); + quint32 *dest32 = reinterpret_cast(dest); + + quint32 x, y, t; + quint8 a8, ia8; + { + x = src32[0]; + y = dest32[0]; + + a8 = a >> 24; + ia8 = ia >> 24; + + // a0,g0 + t = (((x & 0x0003e0ff) * a8 + (y & 0x0003e0ff) * ia8) >> 5) + & 0x0003e0ff; + + // r0,b0 + t |= (((x & 0x007c1f00) * a8 + (y & 0x007c1f00) * ia8) >> 5) + & 0x007c1f00; + + a8 = (a >> 16) & 0xff; + ia8 = (ia >> 16) & 0xff; + + // a1 + t |= (((x & 0xff000000) >> 5) * a8 + ((y & 0xff000000) >> 5) * ia8) + & 0xff000000; + + dest32[0] = t; + } + { + x = src32[1]; + y = dest32[1]; + + // r1,b1 + t = (((x & 0x00007c1f) * a8 + (y & 0x00007c1f) * ia8) >> 5) + & 0x00007c1f; + + // g1 + t |= (((x & 0x000003e0) * a8 + (y & 0x000003e0) * ia8) >> 5) + & 0x000003e0; + + a8 = (a >> 8) & 0xff; + ia8 = (ia >> 8) & 0xff; + + // a2 + t |= (((x & 0x00ff0000) * a8 + (y & 0x00ff0000) * ia8) >> 5) + & 0x00ff0000; + + { + // rgb2 + quint16 x16 = (x >> 24) | ((src32[2] & 0x000000ff) << 8); + quint16 y16 = (y >> 24) | ((dest32[2] & 0x000000ff) << 8); + quint16 t16; + + t16 = (((x16 & 0x7c1f) * a8 + (y16 & 0x7c1f) * ia8) >> 5) & 0x7c1f; + t16 |= (((x16 & 0x03e0) * a8 + (y16 & 0x03e0) * ia8) >> 5) & 0x03e0; + + // rg2 + t |= ((t16 & 0x00ff) << 24); + + dest32[1] = t; + + x = src32[2]; + y = dest32[2]; + + // gb2 + t = (t16 >> 8); + } + } + { + a8 = a & 0xff; + ia8 = ia & 0xff; + + // g3,a3 + t |= (((x & 0x03e0ff00) * a8 + (y & 0x03e0ff00) * ia8) >> 5) + & 0x03e0ff00; + + // r3,b3 + t |= (((x & 0x7c1f0000) >> 5) * a8 + ((y & 0x7c1f0000) >> 5) * ia8) + & 0x7c1f0000; + + dest32[2] = t; + } +} +#endif // Q_BYTE_ORDER + +template <> +inline void interpolate_pixel_4(qrgb888 *dest, const qrgb888 *src, + quint32 alpha) +{ + Q_ASSERT((quintptr(dest) & 0x3) == 0); + Q_ASSERT((quintptr(src) & 0x3) == 0); + + const quint32 a = eff_alpha_4(alpha, dest); + const quint32 ia = eff_ialpha_4(alpha, dest); + const quint32 *src32 = reinterpret_cast(src); + quint32 *dest32 = reinterpret_cast(dest); + + { + quint32 x = src32[0]; + quint32 y = dest32[0]; + + quint32 t; + t = ((x >> 8) & 0xff00ff) * (a >> 24) + + ((y >> 8) & 0xff00ff) * (ia >> 24); + t = (t + ((t >> 8) & 0xff00ff) + 0x800080); + t &= 0xff00ff00; + + x = (x & 0xff0000) * (a >> 24) + + (x & 0x0000ff) * ((a >> 16) & 0xff) + + (y & 0xff0000) * (ia >> 24) + + (y & 0x0000ff) * ((ia >> 16) & 0xff); + x = (x + ((x >> 8) & 0xff00ff) + 0x800080) >> 8; + x &= 0x00ff00ff; + + dest32[0] = x | t; + } + { + quint32 x = src32[1]; + quint32 y = dest32[1]; + + quint32 t; + t = ((x >> 8) & 0xff0000) * ((a >> 16) & 0xff) + + ((x >> 8) & 0x0000ff) * ((a >> 8) & 0xff) + + ((y >> 8) & 0xff0000) * ((ia >> 16) & 0xff) + + ((y >> 8) & 0x0000ff) * ((ia >> 8) & 0xff); + t = (t + ((t >> 8) & 0xff00ff) + 0x800080); + t &= 0xff00ff00; + + x = (x & 0xff0000) * ((a >> 16) & 0xff) + + (x & 0x0000ff) * ((a >> 8) & 0xff) + + (y & 0xff0000) * ((ia >> 16) & 0xff) + + (y & 0x0000ff) * ((ia >> 8) & 0xff); + x = (x + ((x >> 8) & 0xff00ff) + 0x800080) >> 8; + x &= 0x00ff00ff; + + dest32[1] = x | t; + } + { + quint32 x = src32[2]; + quint32 y = dest32[2]; + + quint32 t; + t = ((x >> 8) & 0xff0000) * ((a >> 8) & 0xff) + + ((x >> 8) & 0x0000ff) * (a & 0xff) + + ((y >> 8) & 0xff0000) * ((ia >> 8) & 0xff) + + ((y >> 8) & 0x0000ff) * (ia & 0xff); + t = (t + ((t >> 8) & 0xff00ff) + 0x800080); + t &= 0xff00ff00; + + x = (x & 0xff00ff) * (a & 0xff) + + (y & 0xff00ff) * (ia & 0xff); + x = (x + ((x >> 8) & 0xff00ff) + 0x800080) >> 8; + x &= 0x00ff00ff; + + dest32[2] = x | t; + } +} + +template +inline void interpolate_pixel_4(DST *dest, quint8 a, + const SRC *src, quint8 b) +{ + Q_ASSERT((quintptr(dest) & 0x3) == 0); + Q_ASSERT((quintptr(src) & 0x3) == 0); + + dest[0] = dest[0].byte_mul(a) + DST(src[0]).byte_mul(b); + dest[1] = dest[1].byte_mul(a) + DST(src[1]).byte_mul(b); + dest[2] = dest[2].byte_mul(a) + DST(src[2]).byte_mul(b); + dest[3] = dest[3].byte_mul(a) + DST(src[3]).byte_mul(b); +} + +template +inline void blend_sourceOver_4(DST *dest, const SRC *src) +{ + Q_ASSERT((quintptr(dest) & 0x3) == 0); + Q_ASSERT((quintptr(src) & 0x3) == 0); + + const quint32 a = alpha_4(src); + if (a == 0xffffffff) { + qt_memconvert(dest, src, 4); + } else if (a > 0) { + quint32 buf[3]; // array of quint32 to get correct alignment + qt_memconvert((DST*)buf, src, 4); + madd_4(dest, eff_ialpha_4(a, dest), (DST*)buf); + } +} + +template <> +inline void blend_sourceOver_4(qargb8565 *dest, const qargb8565 *src) +{ + Q_ASSERT((quintptr(dest) & 0x3) == 0); + Q_ASSERT((quintptr(src) & 0x3) == 0); + + const quint32 a = alpha_4(src); + if (a == 0xffffffff) { + qt_memconvert(dest, src, 4); + } else if (a > 0) { + madd_4(dest, eff_ialpha_4(a, dest), src); + } +} + +template <> +inline void blend_sourceOver_4(qargb8555 *dest, const qargb8555 *src) +{ + Q_ASSERT((quintptr(dest) & 0x3) == 0); + Q_ASSERT((quintptr(src) & 0x3) == 0); + + const quint32 a = alpha_4(src); + if (a == 0xffffffff) { + qt_memconvert(dest, src, 4); + } else if (a > 0) { + madd_4(dest, eff_ialpha_4(a, dest), src); + } +} + +template <> +inline void blend_sourceOver_4(qargb6666 *dest, const qargb6666 *src) +{ + Q_ASSERT((quintptr(dest) & 0x3) == 0); + Q_ASSERT((quintptr(src) & 0x3) == 0); + + const quint32 a = alpha_4(src); + if (a == 0xffffffff) { + qt_memconvert(dest, src, 4); + } else if (a > 0) { + madd_4(dest, eff_ialpha_4(a, dest), src); + } +} + /* Destination fetch. This is simple as we don't have to do bounds checks or transformations @@ -2649,8 +3506,6 @@ static inline Operator getOperator(const QSpanData *data, const QSpan *spans, in return op; } - - // -------------------- blend methods --------------------- static void blend_color_generic(int count, const QSpan *spans, void *userData) { @@ -2832,859 +3687,6 @@ static void blend_untransformed_argb(int count, const QSpan *spans, void *userDa } } -template -inline void madd_2(DST *dest, const quint16 alpha, const SRC *src) -{ - Q_ASSERT((quintptr(dest) & 0x3) == 0); - Q_ASSERT((quintptr(src) & 0x3) == 0); - dest[0] = dest[0].byte_mul(alpha >> 8) + DST(src[0]); - dest[1] = dest[1].byte_mul(alpha & 0xff) + DST(src[1]); -} - -template -inline void madd_4(DST *dest, const quint32 alpha, const SRC *src) -{ - Q_ASSERT((quintptr(dest) & 0x3) == 0); - Q_ASSERT((quintptr(src) & 0x3) == 0); - dest[0] = dest[0].byte_mul(alpha >> 24) + DST(src[0]); - dest[1] = dest[1].byte_mul((alpha >> 16) & 0xff) + DST(src[1]); - dest[2] = dest[2].byte_mul((alpha >> 8) & 0xff) + DST(src[2]); - dest[3] = dest[3].byte_mul(alpha & 0xff) + DST(src[3]); -} - -#if Q_BYTE_ORDER == Q_LITTLE_ENDIAN -template <> -inline void madd_4(qargb8565 *dest, const quint32 a, const qargb8565 *src) -{ - Q_ASSERT((quintptr(dest) & 0x3) == 0); - Q_ASSERT((quintptr(src) & 0x3) == 0); - - const quint32 *src32 = reinterpret_cast(src); - quint32 *dest32 = reinterpret_cast(dest); - quint32 x, y, t; - quint8 a8; - - { - x = dest32[0]; - y = src32[0]; - - a8 = a >> 24; - - // a0,g0 - t = ((((x & 0x0007e0ff) * a8) >> 5) & 0x0007e0ff) + (y & 0x0007c0f8); - - // r0,b0 - t |= ((((x & 0x00f81f00) * a8) >> 5) & 0x00f81f00) + (y & 0x00f81f00); - - a8 = (a >> 16) & 0xff; - - // a1 - t |= ((((x & 0xff000000) >> 5) * a8) & 0xff000000) + (y & 0xf8000000); - - dest32[0] = t; - } - { - x = dest32[1]; - y = src32[1]; - - // r1,b1 - t = ((((x & 0x0000f81f) * a8) >> 5) & 0x0000f81f) + (y & 0x0000f81f); - - // g1 - t |= ((((x & 0x000007e0) * a8) >> 5) & 0x000007e0) + (y & 0x000007c0); - - a8 = (a >> 8) & 0xff; - - // a2 - t |= ((((x & 0x00ff0000) * a8) >> 5) & 0x00ff0000) + (y & 0x00f80000); - - { - // rgb2 - quint16 x16 = (x >> 24) | ((dest32[2] & 0x000000ff) << 8); - quint16 y16 = (y >> 24) | ((src32[2] & 0x000000ff) << 8); - quint16 t16; - - t16 = ((((x16 & 0xf81f) * a8) >> 5) & 0xf81f) + (y16 & 0xf81f); - t16 |= ((((x16 & 0x07e0) * a8) >> 5) & 0x07e0) + (y16 & 0x07c0); - - // rg2 - t |= ((t16 & 0x00ff) << 24); - - dest32[1] = t; - - x = dest32[2]; - y = src32[2]; - - // gb2 - t = (t16 >> 8); - } - } - { - a8 = a & 0xff; - - // g3,a3 - t |= ((((x & 0x07e0ff00) * a8) >> 5) & 0x07e0ff00) + (y & 0x07c0f800); - - // r3,b3 - t |= ((((x & 0xf81f0000) >> 5) * a8) & 0xf81f0000)+ (y & 0xf81f0000); - - dest32[2] = t; - } -} -#endif - -#if Q_BYTE_ORDER == Q_LITTLE_ENDIAN -template <> -inline void madd_4(qargb8555 *dest, const quint32 a, const qargb8555 *src) -{ - Q_ASSERT((quintptr(dest) & 0x3) == 0); - Q_ASSERT((quintptr(src) & 0x3) == 0); - - const quint32 *src32 = reinterpret_cast(src); - quint32 *dest32 = reinterpret_cast(dest); - quint32 x, y, t; - quint8 a8; - - { - x = dest32[0]; - y = src32[0]; - - a8 = a >> 24; - - // a0,g0 - t = ((((x & 0x0003e0ff) * a8) >> 5) & 0x0003e0ff) + (y & 0x0003e0f8); - - // r0,b0 - t |= ((((x & 0x007c1f00) * a8) >> 5) & 0x007c1f00) + (y & 0x007c1f00); - - a8 = (a >> 16) & 0xff; - - // a1 - t |= ((((x & 0xff000000) >> 5) * a8) & 0xff000000) + (y & 0xf8000000); - - dest32[0] = t; - } - { - x = dest32[1]; - y = src32[1]; - - // r1,b1 - t = ((((x & 0x00007c1f) * a8) >> 5) & 0x00007c1f) + (y & 0x00007c1f); - - // g1 - t |= ((((x & 0x000003e0) * a8) >> 5) & 0x000003e0) + (y & 0x000003e0); - - a8 = (a >> 8) & 0xff; - - // a2 - t |= ((((x & 0x00ff0000) * a8) >> 5) & 0x00ff0000) + (y & 0x00f80000); - - { - // rgb2 - quint16 x16 = (x >> 24) | ((dest32[2] & 0x000000ff) << 8); - quint16 y16 = (y >> 24) | ((src32[2] & 0x000000ff) << 8); - quint16 t16; - - t16 = ((((x16 & 0x7c1f) * a8) >> 5) & 0x7c1f) + (y16 & 0x7c1f); - t16 |= ((((x16 & 0x03e0) * a8) >> 5) & 0x03e0) + (y16 & 0x03e0); - - // rg2 - t |= ((t16 & 0x00ff) << 24); - - dest32[1] = t; - - x = dest32[2]; - y = src32[2]; - - // gb2 - t = (t16 >> 8); - } - } - { - a8 = a & 0xff; - - // g3,a3 - t |= ((((x & 0x03e0ff00) * a8) >> 5) & 0x03e0ff00) + (y & 0x03e0f800); - - // r3,b3 - t |= ((((x & 0x7c1f0000) >> 5) * a8) & 0x7c1f0000)+ (y & 0x7c1f0000); - - dest32[2] = t; - } -} -#endif - -template -inline quint16 alpha_2(const T *src) -{ - Q_ASSERT((quintptr(src) & 0x3) == 0); - - if (T::hasAlpha()) - return (src[0].alpha() << 8) | src[1].alpha(); - else - return 0xffff; -} - -template -inline quint32 alpha_4(const T *src) -{ - Q_ASSERT((quintptr(src) & 0x3) == 0); - - if (T::hasAlpha()) { - return (src[0].alpha() << 24) | (src[1].alpha() << 16) - | (src[2].alpha() << 8) | src[3].alpha(); - } else { - return 0xffffffff; - } -} - -template <> -inline quint32 alpha_4(const qargb8565 *src) -{ - const quint8 *src8 = reinterpret_cast(src); - return src8[0] << 24 | src8[3] << 16 | src8[6] << 8 | src8[9]; -} - -template <> -inline quint32 alpha_4(const qargb6666 *src) -{ - const quint8 *src8 = reinterpret_cast(src); - return ((src8[2] & 0xfc) | (src8[2] >> 6)) << 24 - | ((src8[5] & 0xfc) | (src8[5] >> 6)) << 16 - | ((src8[8] & 0xfc) | (src8[8] >> 6)) << 8 - | ((src8[11] & 0xfc) | (src8[11] >> 6)); -} - -template <> -inline quint32 alpha_4(const qargb8555 *src) -{ - Q_ASSERT((quintptr(src) & 0x3) == 0); - const quint8 *src8 = reinterpret_cast(src); - return src8[0] << 24 | src8[3] << 16 | src8[6] << 8 | src8[9]; -} - -template <> -inline quint16 alpha_2(const qargb4444 *src) -{ - const quint32 *src32 = reinterpret_cast(src); - const quint32 t = (*src32 & 0xf000f000) | - ((*src32 & 0xf000f000) >> 4); - return (t >> 24) | (t & 0xff00); -} - -template -inline quint16 eff_alpha_2(quint16 alpha, const T*) -{ - return (T::alpha((alpha >> 8) & 0xff) << 8) - | T::alpha(alpha & 0xff); -} - -template <> -inline quint16 eff_alpha_2(quint16 a, const qrgb565*) -{ - return ((((a & 0xff00) + 0x0100) >> 3) & 0xff00) - | ((((a & 0x00ff) + 0x0001) >> 3) & 0x00ff); -} - -template <> -inline quint16 eff_alpha_2(quint16 a, const qrgb444*) -{ - return (((a & 0x00ff) + 0x0001) >> 4) - | ((((a & 0xff00) + 0x0100) >> 4) & 0xff00); -} - -template <> -inline quint16 eff_alpha_2(quint16 a, const qargb4444*) -{ - return (((a & 0x00ff) + 0x0001) >> 4) - | ((((a & 0xff00) + 0x0100) >> 4) & 0xff00); -} - -template -inline quint16 eff_ialpha_2(quint16 alpha, const T*) -{ - return (T::ialpha((alpha >> 8) & 0xff) << 8) - | T::ialpha(alpha & 0xff); -} - -template <> -inline quint16 eff_ialpha_2(quint16 a, const qrgb565 *dummy) -{ - return 0x2020 - eff_alpha_2(a, dummy); -} - -template <> -inline quint16 eff_ialpha_2(quint16 a, const qargb4444 *dummy) -{ - return 0x1010 - eff_alpha_2(a, dummy); -} - -template <> -inline quint16 eff_ialpha_2(quint16 a, const qrgb444 *dummy) -{ - return 0x1010 - eff_alpha_2(a, dummy); -} - -template -inline quint32 eff_alpha_4(quint32 alpha, const T*) -{ - return (T::alpha(alpha >> 24) << 24) - | (T::alpha((alpha >> 16) & 0xff) << 16) - | (T::alpha((alpha >> 8) & 0xff) << 8) - | T::alpha(alpha & 0xff); -} - -template <> -inline quint32 eff_alpha_4(quint32 a, const qrgb888*) -{ - return a; -} - -template <> -inline quint32 eff_alpha_4(quint32 a, const qargb8565*) -{ - return ((((a & 0xff00ff00) + 0x01000100) >> 3) & 0xff00ff00) - | ((((a & 0x00ff00ff) + 0x00010001) >> 3) & 0x00ff00ff); -} - -template <> -inline quint32 eff_alpha_4(quint32 a, const qargb6666*) -{ - return ((((a & 0xff00ff00) >> 2) + 0x00400040) & 0xff00ff00) - | ((((a & 0x00ff00ff) + 0x00010001) >> 2) & 0x00ff00ff); -} - -template <> -inline quint32 eff_alpha_4(quint32 a, const qrgb666*) -{ - return ((((a & 0xff00ff00) >> 2) + 0x00400040) & 0xff00ff00) - | ((((a & 0x00ff00ff) + 0x00010001) >> 2) & 0x00ff00ff); -} - -template <> -inline quint32 eff_alpha_4(quint32 a, const qargb8555*) -{ - return ((((a & 0xff00ff00) + 0x01000100) >> 3) & 0xff00ff00) - | ((((a & 0x00ff00ff) + 0x00010001) >> 3) & 0x00ff00ff); -} - -template -inline quint32 eff_ialpha_4(quint32 alpha, const T*) -{ - return (T::ialpha(alpha >> 24) << 24) - | (T::ialpha((alpha >> 16) & 0xff) << 16) - | (T::ialpha((alpha >> 8) & 0xff) << 8) - | T::ialpha(alpha & 0xff); -} - -template <> -inline quint32 eff_ialpha_4(quint32 a, const qrgb888*) -{ - return ~a; -} - -template <> -inline quint32 eff_ialpha_4(quint32 a, const qargb8565 *dummy) -{ - return 0x20202020 - eff_alpha_4(a, dummy); -} - -template <> -inline quint32 eff_ialpha_4(quint32 a, const qargb6666 *dummy) -{ - return 0x40404040 - eff_alpha_4(a, dummy); -} - -template <> -inline quint32 eff_ialpha_4(quint32 a, const qrgb666 *dummy) -{ - return 0x40404040 - eff_alpha_4(a, dummy); -} - -template <> -inline quint32 eff_ialpha_4(quint32 a, const qargb8555 *dummy) -{ - return 0x20202020 - eff_alpha_4(a, dummy); -} - -template -inline void interpolate_pixel_2(DST *dest, const SRC *src, quint16 alpha) -{ - Q_ASSERT((quintptr(dest) & 0x3) == 0); - Q_ASSERT((quintptr(src) & 0x3) == 0); - - const quint16 a = eff_alpha_2(alpha, dest); - const quint16 ia = eff_ialpha_2(alpha, dest); - - dest[0] = DST(src[0]).byte_mul(a >> 8) + dest[0].byte_mul(ia >> 8); - dest[1] = DST(src[1]).byte_mul(a & 0xff) + dest[1].byte_mul(ia & 0xff); -} - -template -inline void interpolate_pixel(DST &dest, quint8 a, const SRC &src, quint8 b) -{ - if (SRC::hasAlpha() && !DST::hasAlpha()) - interpolate_pixel(dest, a, DST(src), b); - else - dest = dest.byte_mul(a) + DST(src).byte_mul(b); -} - -template <> -inline void interpolate_pixel(qargb8565 &dest, quint8 a, - const qargb8565 &src, quint8 b) -{ - quint8 *d = reinterpret_cast(&dest); - const quint8 *s = reinterpret_cast(&src); - d[0] = (d[0] * a + s[0] * b) >> 5; - - const quint16 x = (d[2] << 8) | d[1]; - const quint16 y = (s[2] << 8) | s[1]; - quint16 t = (((x & 0x07e0) * a + (y & 0x07e0) * b) >> 5) & 0x07e0; - t |= (((x & 0xf81f) * a + (y & 0xf81f) * b) >> 5) & 0xf81f; - - d[1] = t & 0xff; - d[2] = t >> 8; -} - -template <> -inline void interpolate_pixel(qrgb565 &dest, quint8 a, - const qrgb565 &src, quint8 b) -{ - const quint16 x = dest.rawValue(); - const quint16 y = src.rawValue(); - quint16 t = (((x & 0x07e0) * a + (y & 0x07e0) * b) >> 5) & 0x07e0; - t |= (((x & 0xf81f) * a + (y & 0xf81f) * b) >> 5) & 0xf81f; - dest = t; -} - -template <> -inline void interpolate_pixel(qrgb555 &dest, quint8 a, - const qrgb555 &src, quint8 b) -{ - const quint16 x = dest.rawValue(); - const quint16 y = src.rawValue(); - quint16 t = (((x & 0x03e0) * a + (y & 0x03e0) * b) >> 5) & 0x03e0; - t |= ((((x & 0x7c1f) * a) + ((y & 0x7c1f) * b)) >> 5) & 0x7c1f; - dest = t; -} - -template <> -inline void interpolate_pixel(qrgb444 &dest, quint8 a, - const qrgb444 &src, quint8 b) -{ - const quint16 x = dest.rawValue(); - const quint16 y = src.rawValue(); - quint16 t = ((x & 0x00f0) * a + (y & 0x00f0) * b) & 0x0f00; - t |= ((x & 0x0f0f) * a + (y & 0x0f0f) * b) & 0xf0f0; - quint16 *d = reinterpret_cast(&dest); - *d = (t >> 4); -} - -template -inline void interpolate_pixel_2(DST *dest, quint8 a, - const SRC *src, quint8 b) -{ - Q_ASSERT((quintptr(dest) & 0x3) == 0); - Q_ASSERT((quintptr(src) & 0x3) == 0); - - Q_ASSERT(!SRC::hasAlpha()); - - dest[0] = dest[0].byte_mul(a) + DST(src[0]).byte_mul(b); - dest[1] = dest[1].byte_mul(a) + DST(src[1]).byte_mul(b); -} - -template <> -inline void interpolate_pixel_2(qrgb565 *dest, quint8 a, - const qrgb565 *src, quint8 b) -{ - quint32 *x = reinterpret_cast(dest); - const quint32 *y = reinterpret_cast(src); - quint32 t = (((*x & 0xf81f07e0) >> 5) * a + - ((*y & 0xf81f07e0) >> 5) * b) & 0xf81f07e0; - t |= (((*x & 0x07e0f81f) * a - + (*y & 0x07e0f81f) * b) >> 5) & 0x07e0f81f; - *x = t; -} - -template <> -inline void interpolate_pixel_2(qrgb555 *dest, quint8 a, - const qrgb555 *src, quint8 b) -{ - quint32 *x = reinterpret_cast(dest); - const quint32 *y = reinterpret_cast(src); - quint32 t = (((*x & 0x7c1f03e0) >> 5) * a + - ((*y & 0x7c1f03e0) >> 5) * b) & 0x7c1f03e0; - t |= (((*x & 0x03e07c1f) * a - + (*y & 0x03e07c1f) * b) >> 5) & 0x03e07c1f; - *x = t; -} - -template <> -inline void interpolate_pixel_2(qrgb444 *dest, quint8 a, - const qrgb444 *src, quint8 b) -{ - quint32 *x = reinterpret_cast(dest); - const quint32 *y = reinterpret_cast(src); - quint32 t = ((*x & 0x0f0f0f0f) * a + (*y & 0x0f0f0f0f) * b) & 0xf0f0f0f0; - t |= ((*x & 0x00f000f0) * a + (*y & 0x00f000f0) * b) & 0x0f000f00; - *x = t >> 4; -} - -template -inline void interpolate_pixel_4(DST *dest, const SRC *src, quint32 alpha) -{ - Q_ASSERT((quintptr(dest) & 0x3) == 0); - Q_ASSERT((quintptr(src) & 0x3) == 0); - - const quint32 a = eff_alpha_4(alpha, dest); - const quint32 ia = eff_ialpha_4(alpha, dest); - dest[0] = DST(src[0]).byte_mul(a >> 24) - + dest[0].byte_mul(ia >> 24); - dest[1] = DST(src[1]).byte_mul((a >> 16) & 0xff) - + dest[1].byte_mul((ia >> 16) & 0xff); - dest[2] = DST(src[2]).byte_mul((a >> 8) & 0xff) - + dest[2].byte_mul((ia >> 8) & 0xff); - dest[3] = DST(src[3]).byte_mul(a & 0xff) - + dest[3].byte_mul(ia & 0xff); -} - -#if Q_BYTE_ORDER == Q_LITTLE_ENDIAN -template <> -inline void interpolate_pixel_4(qargb8565 *dest, const qargb8565 *src, - quint32 alpha) -{ - Q_ASSERT((quintptr(dest) & 0x3) == 0); - Q_ASSERT((quintptr(src) & 0x3) == 0); - - const quint32 a = eff_alpha_4(alpha, dest); - const quint32 ia = eff_ialpha_4(alpha, dest); - const quint32 *src32 = reinterpret_cast(src); - quint32 *dest32 = reinterpret_cast(dest); - - quint32 x, y, t; - quint8 a8, ia8; - { - x = src32[0]; - y = dest32[0]; - - a8 = a >> 24; - ia8 = ia >> 24; - - // a0,g0 - t = (((x & 0x0007e0ff) * a8 + (y & 0x0007e0ff) * ia8) >> 5) - & 0x0007e0ff; - - // r0,b0 - t |= (((x & 0x00f81f00) * a8 + (y & 0x00f81f00) * ia8) >> 5) - & 0x00f81f00; - - a8 = (a >> 16) & 0xff; - ia8 = (ia >> 16) & 0xff; - - // a1 - t |= (((x & 0xff000000) >> 5) * a8 + ((y & 0xff000000) >> 5) * ia8) - & 0xff000000; - - dest32[0] = t; - } - { - x = src32[1]; - y = dest32[1]; - - // r1,b1 - t = (((x & 0x0000f81f) * a8 + (y & 0x0000f81f) * ia8) >> 5) - & 0x0000f81f; - - // g1 - t |= (((x & 0x000007e0) * a8 + (y & 0x000007e0) * ia8) >> 5) - & 0x000007e0; - - a8 = (a >> 8) & 0xff; - ia8 = (ia >> 8) & 0xff; - - // a2 - t |= (((x & 0x00ff0000) * a8 + (y & 0x00ff0000) * ia8) >> 5) - & 0x00ff0000; - - { - // rgb2 - quint16 x16 = (x >> 24) | ((src32[2] & 0x000000ff) << 8); - quint16 y16 = (y >> 24) | ((dest32[2] & 0x000000ff) << 8); - quint16 t16; - - t16 = (((x16 & 0xf81f) * a8 + (y16 & 0xf81f) * ia8) >> 5) & 0xf81f; - t16 |= (((x16 & 0x07e0) * a8 + (y16 & 0x07e0) * ia8) >> 5) & 0x07e0; - - // rg2 - t |= ((t16 & 0x00ff) << 24); - - dest32[1] = t; - - x = src32[2]; - y = dest32[2]; - - // gb2 - t = (t16 >> 8); - } - } - { - a8 = a & 0xff; - ia8 = ia & 0xff; - - // g3,a3 - t |= (((x & 0x07e0ff00) * a8 + (y & 0x07e0ff00) * ia8) >> 5) - & 0x07e0ff00; - - // r3,b3 - t |= (((x & 0xf81f0000) >> 5) * a8 + ((y & 0xf81f0000) >> 5) * ia8) - & 0xf81f0000; - - dest32[2] = t; - } -} -#endif - -#if Q_BYTE_ORDER == Q_LITTLE_ENDIAN -template <> -inline void interpolate_pixel_4(qargb8555 *dest, const qargb8555 *src, - quint32 alpha) -{ - Q_ASSERT((quintptr(dest) & 0x3) == 0); - Q_ASSERT((quintptr(src) & 0x3) == 0); - - - const quint32 a = eff_alpha_4(alpha, dest); - const quint32 ia = eff_ialpha_4(alpha, dest); - const quint32 *src32 = reinterpret_cast(src); - quint32 *dest32 = reinterpret_cast(dest); - - quint32 x, y, t; - quint8 a8, ia8; - { - x = src32[0]; - y = dest32[0]; - - a8 = a >> 24; - ia8 = ia >> 24; - - // a0,g0 - t = (((x & 0x0003e0ff) * a8 + (y & 0x0003e0ff) * ia8) >> 5) - & 0x0003e0ff; - - // r0,b0 - t |= (((x & 0x007c1f00) * a8 + (y & 0x007c1f00) * ia8) >> 5) - & 0x007c1f00; - - a8 = (a >> 16) & 0xff; - ia8 = (ia >> 16) & 0xff; - - // a1 - t |= (((x & 0xff000000) >> 5) * a8 + ((y & 0xff000000) >> 5) * ia8) - & 0xff000000; - - dest32[0] = t; - } - { - x = src32[1]; - y = dest32[1]; - - // r1,b1 - t = (((x & 0x00007c1f) * a8 + (y & 0x00007c1f) * ia8) >> 5) - & 0x00007c1f; - - // g1 - t |= (((x & 0x000003e0) * a8 + (y & 0x000003e0) * ia8) >> 5) - & 0x000003e0; - - a8 = (a >> 8) & 0xff; - ia8 = (ia >> 8) & 0xff; - - // a2 - t |= (((x & 0x00ff0000) * a8 + (y & 0x00ff0000) * ia8) >> 5) - & 0x00ff0000; - - { - // rgb2 - quint16 x16 = (x >> 24) | ((src32[2] & 0x000000ff) << 8); - quint16 y16 = (y >> 24) | ((dest32[2] & 0x000000ff) << 8); - quint16 t16; - - t16 = (((x16 & 0x7c1f) * a8 + (y16 & 0x7c1f) * ia8) >> 5) & 0x7c1f; - t16 |= (((x16 & 0x03e0) * a8 + (y16 & 0x03e0) * ia8) >> 5) & 0x03e0; - - // rg2 - t |= ((t16 & 0x00ff) << 24); - - dest32[1] = t; - - x = src32[2]; - y = dest32[2]; - - // gb2 - t = (t16 >> 8); - } - } - { - a8 = a & 0xff; - ia8 = ia & 0xff; - - // g3,a3 - t |= (((x & 0x03e0ff00) * a8 + (y & 0x03e0ff00) * ia8) >> 5) - & 0x03e0ff00; - - // r3,b3 - t |= (((x & 0x7c1f0000) >> 5) * a8 + ((y & 0x7c1f0000) >> 5) * ia8) - & 0x7c1f0000; - - dest32[2] = t; - } -} -#endif - -template <> -inline void interpolate_pixel_4(qrgb888 *dest, const qrgb888 *src, - quint32 alpha) -{ - Q_ASSERT((quintptr(dest) & 0x3) == 0); - Q_ASSERT((quintptr(src) & 0x3) == 0); - - const quint32 a = eff_alpha_4(alpha, dest); - const quint32 ia = eff_ialpha_4(alpha, dest); - const quint32 *src32 = reinterpret_cast(src); - quint32 *dest32 = reinterpret_cast(dest); - - { - quint32 x = src32[0]; - quint32 y = dest32[0]; - - quint32 t; - t = ((x >> 8) & 0xff00ff) * (a >> 24) - + ((y >> 8) & 0xff00ff) * (ia >> 24); - t = (t + ((t >> 8) & 0xff00ff) + 0x800080); - t &= 0xff00ff00; - - x = (x & 0xff0000) * (a >> 24) - + (x & 0x0000ff) * ((a >> 16) & 0xff) - + (y & 0xff0000) * (ia >> 24) - + (y & 0x0000ff) * ((ia >> 16) & 0xff); - x = (x + ((x >> 8) & 0xff00ff) + 0x800080) >> 8; - x &= 0x00ff00ff; - - dest32[0] = x | t; - } - { - quint32 x = src32[1]; - quint32 y = dest32[1]; - - quint32 t; - t = ((x >> 8) & 0xff0000) * ((a >> 16) & 0xff) - + ((x >> 8) & 0x0000ff) * ((a >> 8) & 0xff) - + ((y >> 8) & 0xff0000) * ((ia >> 16) & 0xff) - + ((y >> 8) & 0x0000ff) * ((ia >> 8) & 0xff); - t = (t + ((t >> 8) & 0xff00ff) + 0x800080); - t &= 0xff00ff00; - - x = (x & 0xff0000) * ((a >> 16) & 0xff) - + (x & 0x0000ff) * ((a >> 8) & 0xff) - + (y & 0xff0000) * ((ia >> 16) & 0xff) - + (y & 0x0000ff) * ((ia >> 8) & 0xff); - x = (x + ((x >> 8) & 0xff00ff) + 0x800080) >> 8; - x &= 0x00ff00ff; - - dest32[1] = x | t; - } - { - quint32 x = src32[2]; - quint32 y = dest32[2]; - - quint32 t; - t = ((x >> 8) & 0xff0000) * ((a >> 8) & 0xff) - + ((x >> 8) & 0x0000ff) * (a & 0xff) - + ((y >> 8) & 0xff0000) * ((ia >> 8) & 0xff) - + ((y >> 8) & 0x0000ff) * (ia & 0xff); - t = (t + ((t >> 8) & 0xff00ff) + 0x800080); - t &= 0xff00ff00; - - x = (x & 0xff00ff) * (a & 0xff) - + (y & 0xff00ff) * (ia & 0xff); - x = (x + ((x >> 8) & 0xff00ff) + 0x800080) >> 8; - x &= 0x00ff00ff; - - dest32[2] = x | t; - } -} - -template -inline void interpolate_pixel_4(DST *dest, quint8 a, - const SRC *src, quint8 b) -{ - Q_ASSERT((quintptr(dest) & 0x3) == 0); - Q_ASSERT((quintptr(src) & 0x3) == 0); - - dest[0] = dest[0].byte_mul(a) + DST(src[0]).byte_mul(b); - dest[1] = dest[1].byte_mul(a) + DST(src[1]).byte_mul(b); - dest[2] = dest[2].byte_mul(a) + DST(src[2]).byte_mul(b); - dest[3] = dest[3].byte_mul(a) + DST(src[3]).byte_mul(b); -} - -template -inline void blend_sourceOver_4(DST *dest, const SRC *src) -{ - Q_ASSERT((quintptr(dest) & 0x3) == 0); - Q_ASSERT((quintptr(src) & 0x3) == 0); - - const quint32 a = alpha_4(src); - if (a == 0xffffffff) { - qt_memconvert(dest, src, 4); - } else if (a > 0) { - quint32 buf[3]; // array of quint32 to get correct alignment - qt_memconvert((DST*)(void*)buf, src, 4); - madd_4(dest, eff_ialpha_4(a, dest), (DST*)(void*)buf); - } -} - -template <> -inline void blend_sourceOver_4(qargb8565 *dest, const qargb8565 *src) -{ - Q_ASSERT((quintptr(dest) & 0x3) == 0); - Q_ASSERT((quintptr(src) & 0x3) == 0); - - const quint32 a = alpha_4(src); - if (a == 0xffffffff) { - qt_memconvert(dest, src, 4); - } else if (a > 0) { - madd_4(dest, eff_ialpha_4(a, dest), src); - } -} - -template <> -inline void blend_sourceOver_4(qargb8555 *dest, const qargb8555 *src) -{ - Q_ASSERT((quintptr(dest) & 0x3) == 0); - Q_ASSERT((quintptr(src) & 0x3) == 0); - - const quint32 a = alpha_4(src); - if (a == 0xffffffff) { - qt_memconvert(dest, src, 4); - } else if (a > 0) { - madd_4(dest, eff_ialpha_4(a, dest), src); - } -} - -template <> -inline void blend_sourceOver_4(qargb6666 *dest, const qargb6666 *src) -{ - Q_ASSERT((quintptr(dest) & 0x3) == 0); - Q_ASSERT((quintptr(src) & 0x3) == 0); - - const quint32 a = alpha_4(src); - if (a == 0xffffffff) { - qt_memconvert(dest, src, 4); - } else if (a > 0) { - madd_4(dest, eff_ialpha_4(a, dest), src); - } -} - template static void QT_FASTCALL blendUntransformed_unaligned(DST *dest, const SRC *src, quint8 coverage, int length) @@ -3862,7 +3864,7 @@ static void QT_FASTCALL blendUntransformed_dest24(DST *dest, const SRC *src, if (SRC::hasAlpha()) { while (length >= 4) { - const quint32 alpha = QT_PREPEND_NAMESPACE(BYTE_MUL)(uint(alpha_4(src)), uint(coverage)); + const quint32 alpha = BYTE_MUL(uint(alpha_4(src)), uint(coverage)); if (alpha) interpolate_pixel_4(dest, src, alpha); length -= 4; diff --git a/src/gui/painting/qdrawhelper_p.h b/src/gui/painting/qdrawhelper_p.h index 240859f1e..033c0414c 100644 --- a/src/gui/painting/qdrawhelper_p.h +++ b/src/gui/painting/qdrawhelper_p.h @@ -1556,20 +1556,6 @@ inline void qt_memconvert(qrgb666 *dest, const quint32 *src, int count) } #endif // Q_BYTE_ORDER -template -inline void qt_rectcopy(T *dest, const T *src, - int x, int y, int width, int height, - int dstStride, int srcStride) -{ - char *d = (char*)(dest + x) + y * dstStride; - const char *s = (const char*)(src); - for (int i = 0; i < height; ++i) { - ::memcpy(d, s, width * sizeof(T)); - d += dstStride; - s += srcStride; - } -} - template inline void qt_rectconvert(DST *dest, const SRC *src, int x, int y, int width, int height, @@ -1590,7 +1576,13 @@ inline void qt_rectconvert(DST *dest, const SRC *src, int x, int y, int width, int height, \ int dstStride, int srcStride) \ { \ - qt_rectcopy(dest, src, x, y, width, height, dstStride, srcStride); \ + char *d = (char*)(dest + x) + y * dstStride; \ + const char *s = (const char*)(src); \ + for (int i = 0; i < height; ++i) { \ + ::memcpy(d, s, width * sizeof(T)); \ + d += dstStride; \ + s += srcStride; \ + } \ } QT_RECTCONVERT_TRIVIAL_IMPL(quint32) @@ -1608,14 +1600,6 @@ QT_RECTCONVERT_TRIVIAL_IMPL(qrgb444) static inline int qt_div_255(int x) { return (x + (x>>8) + 0x80) >> 8; } -static inline QRgb qConvertRgb16To32(uint c) -{ - return 0xff000000 - | ((((c) << 3) & 0xf8) | (((c) >> 2) & 0x7)) - | ((((c) << 5) & 0xfc00) | (((c) >> 1) & 0x300)) - | ((((c) << 8) & 0xf80000) | (((c) << 3) & 0x70000)); -} - static const uint qt_bayer_matrix[16][16] = { { 0x1, 0xc0, 0x30, 0xf0, 0xc, 0xcc, 0x3c, 0xfc, 0x3, 0xc3, 0x33, 0xf3, 0xf, 0xcf, 0x3f, 0xff},