author
int64
658
755k
date
stringlengths
19
19
timezone
int64
-46,800
43.2k
hash
stringlengths
40
40
message
stringlengths
5
490
mods
list
language
stringclasses
20 values
license
stringclasses
3 values
repo
stringlengths
5
68
original_message
stringlengths
12
491
61,745
10.03.2022 08:39:25
0
969589d53bc36adff311af14c0a8489630f45247
Compact duplicate encodings when in SELF_DECOMPRESS mode
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_block_sizes.cpp", "new_path": "Source/astcenc_block_sizes.cpp", "diff": "@@ -1132,6 +1132,7 @@ void init_block_size_descriptor(\nunsigned int y_texels,\nunsigned int z_texels,\nbool can_omit_modes,\n+ unsigned int partition_count_cutoff,\nfloat mode_cutoff,\nblock_size_descriptor& bsd\n) {\n@@ -1144,7 +1145,7 @@ void init_block_size_descriptor(\nconstruct_block_size_descriptor_2d(x_texels, y_texels, can_omit_modes, mode_cutoff, bsd);\n}\n- init_partition_tables(bsd);\n+ init_partition_tables(bsd, can_omit_modes, partition_count_cutoff);\n}\n/* See header for documentation. */\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_compress_symbolic.cpp", "new_path": "Source/astcenc_compress_symbolic.cpp", "diff": "@@ -1202,14 +1202,7 @@ void compress_block(\nfloat block_is_la_scale = block_is_la ? 1.0f / 1.05f : 1.0f;\nbool block_skip_two_plane = false;\n-\n- // Default max partition, but +1 if only have 1 or 2 active components\nint max_partitions = ctx.config.tune_partition_count_limit;\n- if (block_is_l || block_is_la)\n- {\n- max_partitions = astc::min(max_partitions + 1, 4);\n- }\n-\n#if defined(ASTCENC_DIAGNOSTICS)\n// Do this early in diagnostic builds so we can dump uniform metrics\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_entry.cpp", "new_path": "Source/astcenc_entry.cpp", "diff": "@@ -716,10 +716,14 @@ astcenc_error astcenc_context_alloc(\n}\nbsd = new block_size_descriptor;\n+ ctx->bsd = bsd;\n+\nbool can_omit_modes = config.flags & ASTCENC_FLG_SELF_DECOMPRESS_ONLY;\ninit_block_size_descriptor(config.block_x, config.block_y, config.block_z,\n- can_omit_modes, static_cast<float>(config.tune_block_mode_limit) / 100.0f, *bsd);\n- ctx->bsd = bsd;\n+ can_omit_modes,\n+ config.tune_partition_count_limit,\n+ static_cast<float>(config.tune_block_mode_limit) / 100.0f,\n+ *bsd);\n#if !defined(ASTCENC_DECOMPRESS_ONLY)\n// Do setup only needed by compression\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_internal.h", "new_path": "Source/astcenc_internal.h", "diff": "@@ -1381,7 +1381,8 @@ struct astcenc_context\n* @param x_texels The number of texels in the block X dimension.\n* @param y_texels The number of texels in the block Y dimension.\n* @param z_texels The number of texels in the block Z dimension.\n- * @param can_omit_modes Can we discard modes that astcenc won't use, even if legal?\n+ * @param can_omit_modes Can we discard modes and partitionings that astcenc won't use?\n+ * @param partition_count_cutoff The partition count cutoff to use, if we can omit partitionings.\n* @param mode_cutoff The block mode percentile cutoff [0-1].\n* @param[out] bsd The descriptor to initialize.\n*/\n@@ -1390,6 +1391,7 @@ void init_block_size_descriptor(\nunsigned int y_texels,\nunsigned int z_texels,\nbool can_omit_modes,\n+ unsigned int partition_count_cutoff,\nfloat mode_cutoff,\nblock_size_descriptor& bsd);\n@@ -1408,9 +1410,13 @@ void term_block_size_descriptor(\n* calling this function.\n*\n* @param[out] bsd The block size information structure to populate.\n+ * @param can_omit_partitionings True if we can we drop partitionings that astcenc won't use.\n+ * @param partition_count_cutoff The partition count cutoff to use, if we can omit partitionings.\n*/\nvoid init_partition_tables(\n- block_size_descriptor& bsd);\n+ block_size_descriptor& bsd,\n+ bool can_omit_partitionings,\n+ unsigned int partition_count_cutoff);\n/**\n* @brief Get the percentile table for 2D block modes.\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_partition_tables.cpp", "new_path": "Source/astcenc_partition_tables.cpp", "diff": "* @param[out] bit_pattern The output bit pattern representation.\n*/\nstatic void generate_canonical_partitioning(\n- int texel_count,\n+ unsigned int texel_count,\nconst uint8_t* partition_of_texel,\nuint64_t bit_pattern[7]\n) {\n// Clear the pattern\n- for (int i = 0; i < 7; i++)\n+ for (unsigned int i = 0; i < 7; i++)\n{\nbit_pattern[i] = 0;\n}\n@@ -54,10 +54,9 @@ static void generate_canonical_partitioning(\nmapped_index[i] = -1;\n}\n- for (int i = 0; i < texel_count; i++)\n+ for (unsigned int i = 0; i < texel_count; i++)\n{\nint index = partition_of_texel[i];\n-\nif (mapped_index[index] < 0)\n{\nmapped_index[index] = map_weight_count++;\n@@ -86,37 +85,6 @@ static bool compare_canonical_partitionings(\n(part1[6] == part2[6]);\n}\n-/**\n- * @brief Compare all partition patterns and remove duplicates.\n- *\n- * The partitioning algorithm uses a hash function for texel assignment that can produce partitions\n- * which have the same texel assignment groupings. It is only useful for the compressor to test one\n- * of each, so we mark duplicates as invalid.\n- *\n-* @param bit_patterns The scratch memory for the bit patterns.\n- * @param texel_count The first canonical bit pattern to check.\n- * @param[in,out] pt The table of partitioning information entries.\n- */\n-static void remove_duplicate_partitionings(\n- uint64_t* bit_patterns,\n- int texel_count,\n- partition_info pt[BLOCK_MAX_PARTITIONINGS]\n-) {\n- for (unsigned int i = 0; i < BLOCK_MAX_PARTITIONINGS; i++)\n- {\n- generate_canonical_partitioning(texel_count, pt[i].partition_of_texel, bit_patterns + i * 7);\n-\n- for (unsigned int j = 0; j < i; j++)\n- {\n- if (compare_canonical_partitionings(bit_patterns + 7 * i, bit_patterns + 7 * j))\n- {\n- pt[i].partition_count = 0;\n- break;\n- }\n- }\n- }\n-}\n-\n/**\n* @brief Hash function used for procedural partition assignment.\n*\n@@ -363,18 +331,46 @@ static bool generate_one_partition_info_entry(\nstatic void build_partition_table_for_one_partition_count(\nblock_size_descriptor& bsd,\n+ bool can_omit_partitionings,\n+ unsigned int partition_count_cutoff,\nunsigned int partition_count,\n- partition_info* ptab\n+ partition_info* ptab,\n+ uint64_t* canonical_patterns\n) {\nunsigned int next_index = 0;\nbsd.partitioning_count[partition_count - 1] = 0;\n+\n+ if (partition_count > partition_count_cutoff)\n+ {\n+ return;\n+ }\n+\nfor (unsigned int i = 0; i < BLOCK_MAX_PARTITIONINGS; i++)\n{\nbool keep = generate_one_partition_info_entry(bsd, partition_count, i, ptab[next_index]);\n+ if (!keep && can_omit_partitionings)\n+ {\n+ bsd.partitioning_packed_index[partition_count - 1][i] = BLOCK_BAD_PARTITIONING;\n+ continue;\n+ }\n+\n+ generate_canonical_partitioning(bsd.texel_count, ptab[next_index].partition_of_texel, canonical_patterns + next_index * 7);\n+ keep = true;\n+ for (unsigned int j = 0; j < next_index; j++)\n+ {\n+ bool match = compare_canonical_partitionings(canonical_patterns + 7 * next_index, canonical_patterns + 7 * j);\n+ if (match)\n+ {\n+ ptab[next_index].partition_count = 0;\n+ keep = !can_omit_partitionings;\n+ break;\n+ }\n+ }\n+\nif (keep)\n{\nbsd.partitioning_packed_index[partition_count - 1][i] = next_index;\n- bsd.partitioning_count[partition_count - 1]++;\n+ bsd.partitioning_count[partition_count - 1] = next_index + 1;\nnext_index++;\n}\nelse\n@@ -386,7 +382,9 @@ static void build_partition_table_for_one_partition_count(\n/* See header for documentation. */\nvoid init_partition_tables(\n- block_size_descriptor& bsd\n+ block_size_descriptor& bsd,\n+ bool can_omit_partitionings,\n+ unsigned int partition_count_cutoff\n) {\npartition_info* par_tab2 = bsd.partitionings;\npartition_info* par_tab3 = par_tab2 + BLOCK_MAX_PARTITIONINGS;\n@@ -397,15 +395,11 @@ void init_partition_tables(\nbsd.partitioning_count[0] = 1;\nbsd.partitioning_packed_index[0][0] = 0;\n- build_partition_table_for_one_partition_count(bsd, 2, par_tab2);\n- build_partition_table_for_one_partition_count(bsd, 3, par_tab3);\n- build_partition_table_for_one_partition_count(bsd, 4, par_tab4);\n-\n- uint64_t* bit_patterns = new uint64_t[BLOCK_MAX_PARTITIONINGS * 7];\n+ uint64_t* canonical_patterns = new uint64_t[BLOCK_MAX_PARTITIONINGS * 7];\n- remove_duplicate_partitionings(bit_patterns, bsd.texel_count, par_tab2);\n- remove_duplicate_partitionings(bit_patterns, bsd.texel_count, par_tab3);\n- remove_duplicate_partitionings(bit_patterns, bsd.texel_count, par_tab4);\n+ build_partition_table_for_one_partition_count(bsd, can_omit_partitionings, partition_count_cutoff, 2, par_tab2, canonical_patterns);\n+ build_partition_table_for_one_partition_count(bsd, can_omit_partitionings, partition_count_cutoff, 3, par_tab3, canonical_patterns);\n+ build_partition_table_for_one_partition_count(bsd, can_omit_partitionings, partition_count_cutoff, 4, par_tab4, canonical_patterns);\n- delete[] bit_patterns;\n+ delete[] canonical_patterns;\n}\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Compact duplicate encodings when in SELF_DECOMPRESS mode
61,745
10.03.2022 19:08:13
0
9896e6aa987a3873e6cf2d7386fb88ac473b6754
Don't remove tables above max partition count in decompress configs
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_partition_tables.cpp", "new_path": "Source/astcenc_partition_tables.cpp", "diff": "@@ -340,7 +340,7 @@ static void build_partition_table_for_one_partition_count(\nunsigned int next_index = 0;\nbsd.partitioning_count[partition_count - 1] = 0;\n- if (partition_count > partition_count_cutoff)\n+ if (can_omit_partitionings && (partition_count > partition_count_cutoff))\n{\nreturn;\n}\n@@ -348,7 +348,7 @@ static void build_partition_table_for_one_partition_count(\nfor (unsigned int i = 0; i < BLOCK_MAX_PARTITIONINGS; i++)\n{\nbool keep = generate_one_partition_info_entry(bsd, partition_count, i, ptab[next_index]);\n- if (!keep && can_omit_partitionings)\n+ if (can_omit_partitionings && !keep)\n{\nbsd.partitioning_packed_index[partition_count - 1][i] = BLOCK_BAD_PARTITIONING;\ncontinue;\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Don't remove tables above max partition count in decompress configs
61,745
10.03.2022 20:17:38
0
6fec30c25aea4de3adc0f3a1e51dc1f4dd7f8c6a
Only store remap tables for 2-4 partitions
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_internal.h", "new_path": "Source/astcenc_internal.h", "diff": "@@ -712,9 +712,12 @@ struct block_size_descriptor\n/** @brief The active partition tables, stored in low indices per-count. */\npartition_info partitionings[(3 * BLOCK_MAX_PARTITIONINGS) + 1];\n- /** @brief The packed partition table array index, or @c BLOCK_BAD_PARTITIONING if not active. */\n- // TODO: Index 0 in this contains nothing ...\n- uint16_t partitioning_packed_index[4][BLOCK_MAX_PARTITIONINGS];\n+ /**\n+ * @brief The packed partition table array index, or @c BLOCK_BAD_PARTITIONING if not active.\n+ *\n+ * Indexed by partition_count - 2, containing 2, 3 and 4 partitions.\n+ */\n+ uint16_t partitioning_packed_index[3][BLOCK_MAX_PARTITIONINGS];\n/** @brief The active texels for k-means partition selection. */\nuint8_t kmeans_texels[BLOCK_MAX_KMEANS_TEXELS];\n@@ -796,7 +799,12 @@ struct block_size_descriptor\n*/\nconst partition_info& get_partition_info(unsigned int partition_count, unsigned int index) const\n{\n- unsigned int packed_index = this->partitioning_packed_index[partition_count - 1][index];\n+ unsigned int packed_index = 0;\n+ if (partition_count >= 2)\n+ {\n+ packed_index = this->partitioning_packed_index[partition_count - 2][index];\n+ }\n+\nassert(packed_index != BLOCK_BAD_PARTITIONING && packed_index < this->partitioning_count[partition_count - 1]);\nauto& result = get_partition_table(partition_count)[packed_index];\nassert(index == result.partition_index);\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_partition_tables.cpp", "new_path": "Source/astcenc_partition_tables.cpp", "diff": "@@ -350,7 +350,7 @@ static void build_partition_table_for_one_partition_count(\nbool keep = generate_one_partition_info_entry(bsd, partition_count, i, ptab[next_index]);\nif (can_omit_partitionings && !keep)\n{\n- bsd.partitioning_packed_index[partition_count - 1][i] = BLOCK_BAD_PARTITIONING;\n+ bsd.partitioning_packed_index[partition_count - 2][i] = BLOCK_BAD_PARTITIONING;\ncontinue;\n}\n@@ -369,13 +369,13 @@ static void build_partition_table_for_one_partition_count(\nif (keep)\n{\n- bsd.partitioning_packed_index[partition_count - 1][i] = next_index;\n+ bsd.partitioning_packed_index[partition_count - 2][i] = next_index;\nbsd.partitioning_count[partition_count - 1] = next_index + 1;\nnext_index++;\n}\nelse\n{\n- bsd.partitioning_packed_index[partition_count - 1][i] = BLOCK_BAD_PARTITIONING;\n+ bsd.partitioning_packed_index[partition_count - 2][i] = BLOCK_BAD_PARTITIONING;\n}\n}\n}\n@@ -393,10 +393,8 @@ void init_partition_tables(\ngenerate_one_partition_info_entry(bsd, 1, 0, *par_tab1);\nbsd.partitioning_count[0] = 1;\n- bsd.partitioning_packed_index[0][0] = 0;\nuint64_t* canonical_patterns = new uint64_t[BLOCK_MAX_PARTITIONINGS * 7];\n-\nbuild_partition_table_for_one_partition_count(bsd, can_omit_partitionings, partition_count_cutoff, 2, par_tab2, canonical_patterns);\nbuild_partition_table_for_one_partition_count(bsd, can_omit_partitionings, partition_count_cutoff, 3, par_tab3, canonical_patterns);\nbuild_partition_table_for_one_partition_count(bsd, can_omit_partitionings, partition_count_cutoff, 4, par_tab4, canonical_patterns);\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Only store remap tables for 2-4 partitions
61,745
10.03.2022 20:24:11
0
38a11cb9848457a2b7e0bc25cfb3385935d172bd
Move normal map max partition boost to config init
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_entry.cpp", "new_path": "Source/astcenc_entry.cpp", "diff": "@@ -618,6 +618,11 @@ astcenc_error astcenc_config_init(\nif (flags & ASTCENC_FLG_MAP_NORMAL)\n{\n+ // Normal map encoding uses L+A blocks, so allow one more partitioning\n+ // than normal. We need need fewer bits for endpoints, so more likely\n+ // to be able to use more partitions than an RGB/RGBA block\n+ config.tune_partition_count_limit = astc::min(config.tune_partition_count_limit + 1u, 4u);\n+\nconfig.cw_g_weight = 0.0f;\nconfig.cw_b_weight = 0.0f;\nconfig.tune_2_partition_early_out_limit_factor *= 1.5f;\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Move normal map max partition boost to config init
61,745
10.03.2022 21:47:33
0
621b789c14a6d29be7c08a2a0a605322f9535f00
Optimize memory layout for partition coverage masks
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_find_best_partitioning.cpp", "new_path": "Source/astcenc_find_best_partitioning.cpp", "diff": "@@ -360,26 +360,29 @@ static void count_partition_mismatch_bits(\nconst uint64_t bitmaps[BLOCK_MAX_PARTITIONS],\nunsigned int mismatch_counts[BLOCK_MAX_PARTITIONINGS]\n) {\n- const auto* pt = bsd.get_partition_table(partition_count);\n-\n- // Function pointer dispatch table\n- const mismatch_dispatch dispatch[3] {\n- partition_mismatch2,\n- partition_mismatch3,\n- partition_mismatch4\n- };\n-\n+ if (partition_count == 2)\n+ {\nfor (unsigned int i = 0; i < bsd.partitioning_count[partition_count - 1]; i++)\n{\n- // TODO: Shouldn't need this once we squash out dupes ...\n- int bitcount = 255;\n- if (pt->partition_count == partition_count)\n+ int bitcount = partition_mismatch2(bitmaps, bsd.coverage_bitmaps_2[i]);\n+ mismatch_counts[i] = astc::max(bitcount, static_cast<int>(bsd.partitioning_valid[i]));\n+ }\n+ }\n+ else if (partition_count == 3)\n+ {\n+ for (unsigned int i = 0; i < bsd.partitioning_count[partition_count - 1]; i++)\n{\n- bitcount = dispatch[partition_count - 2](bitmaps, pt->coverage_bitmaps);\n+ int bitcount = partition_mismatch3(bitmaps, bsd.coverage_bitmaps_3[i]);\n+ mismatch_counts[i] = astc::max(bitcount, static_cast<int>(bsd.partitioning_valid[i]));\n+ }\n+ }\n+ else\n+ {\n+ for (unsigned int i = 0; i < bsd.partitioning_count[partition_count - 1]; i++)\n+ {\n+ int bitcount = partition_mismatch4(bitmaps, bsd.coverage_bitmaps_4[i]);\n+ mismatch_counts[i] = astc::max(bitcount, static_cast<int>(bsd.partitioning_valid[i]));\n}\n-\n- mismatch_counts[i] = bitcount;\n- pt++;\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_internal.h", "new_path": "Source/astcenc_internal.h", "diff": "@@ -525,9 +525,6 @@ struct partition_info\n/** @brief The list of texels in each partition. */\nuint8_t texels_of_partition[BLOCK_MAX_PARTITIONS][BLOCK_MAX_TEXELS];\n-\n- /** @brief The canonical partition coverage pattern used during block partition search. */\n- uint64_t coverage_bitmaps[BLOCK_MAX_PARTITIONS];\n};\n/**\n@@ -543,6 +540,9 @@ struct partition_info\n*/\nstruct decimation_info\n{\n+ // TODO: These structures are large. Any partitioning opportunities to\n+ // improve caching and reduce miss rates?\n+\n/** @brief The total number of texels in the block. */\nuint8_t texel_count;\n@@ -722,6 +722,34 @@ struct block_size_descriptor\n/** @brief The active texels for k-means partition selection. */\nuint8_t kmeans_texels[BLOCK_MAX_KMEANS_TEXELS];\n+ /**\n+ * @brief Is 0 if this partition is valid for compression 255 otherwise.\n+ *\n+ * Indexed by remapped index, not physical index.\n+ */\n+ uint8_t partitioning_valid[BLOCK_MAX_PARTITIONINGS];\n+\n+ /**\n+ * @brief The canonical 2 partition coverage pattern used during block partition search.\n+ *\n+ * Indexed by remapped index, not physical index.\n+ */\n+ uint64_t coverage_bitmaps_2[BLOCK_MAX_PARTITIONINGS][2];\n+\n+ /**\n+ * @brief The canonical 3 partition coverage pattern used during block partition search.\n+ *\n+ * Indexed by remapped index, not physical index.\n+ */\n+ uint64_t coverage_bitmaps_3[BLOCK_MAX_PARTITIONINGS][3];\n+\n+ /**\n+ * @brief The canonical 4 partition coverage pattern used during block partition search.\n+ *\n+ * Indexed by remapped index, not physical index.\n+ */\n+ uint64_t coverage_bitmaps_4[BLOCK_MAX_PARTITIONINGS][4];\n+\n/**\n* @brief Get the block mode structure for index @c block_mode.\n*\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_partition_tables.cpp", "new_path": "Source/astcenc_partition_tables.cpp", "diff": "@@ -246,17 +246,19 @@ static uint8_t select_partition(\n/**\n* @brief Generate a single partition info structure.\n*\n- * @param bsd The block size information.\n+ * @param[out] bsd The block size information.\n* @param partition_count The partition count of this partitioning.\n- * @param partition_index The partition index / see of this partitioning.\n+ * @param partition_index The partition index / seed of this partitioning.\n+ * @param partition_remap_index The remapped partition index of this partitioning.\n* @param[out] pi The partition info structure to populate.\n*\n* @return True if this is a useful partition index, False if we can skip it.\n*/\nstatic bool generate_one_partition_info_entry(\n- const block_size_descriptor& bsd,\n+ block_size_descriptor& bsd,\nunsigned int partition_count,\nunsigned int partition_index,\n+ unsigned int partition_remap_index,\npartition_info& pi\n) {\nint texels_per_block = bsd.texel_count;\n@@ -291,6 +293,7 @@ static bool generate_one_partition_info_entry(\n}\n}\n+ // Populate the actual procedural partition count\nif (counts[0] == 0)\n{\npi.partition_count = 0;\n@@ -312,21 +315,48 @@ static bool generate_one_partition_info_entry(\npi.partition_count = 4;\n}\n+ // Populate the partition index\n+ pi.partition_index = partition_index;\n+\n+ // Populate the coverage bitmaps for 2/3/4 partitions\n+ uint64_t* bitmaps { nullptr };\n+ if (partition_count == 2)\n+ {\n+ bitmaps = bsd.coverage_bitmaps_2[partition_remap_index];\n+ }\n+ else if (partition_count == 3)\n+ {\n+ bitmaps = bsd.coverage_bitmaps_3[partition_remap_index];\n+ }\n+ else if (partition_count == 4)\n+ {\n+ bitmaps = bsd.coverage_bitmaps_4[partition_remap_index];\n+ }\n+\nfor (unsigned int i = 0; i < BLOCK_MAX_PARTITIONS; i++)\n{\npi.partition_texel_count[i] = static_cast<uint8_t>(counts[i]);\n- pi.coverage_bitmaps[i] = 0ULL;\n+ }\n+\n+ if (bitmaps)\n+ {\n+ for (unsigned int i = 0; i < partition_count; i++)\n+ {\n+ bitmaps[i] = 0ULL;\n}\nunsigned int texels_to_process = astc::min(bsd.texel_count, BLOCK_MAX_KMEANS_TEXELS);\nfor (unsigned int i = 0; i < texels_to_process; i++)\n{\nunsigned int idx = bsd.kmeans_texels[i];\n- pi.coverage_bitmaps[pi.partition_of_texel[idx]] |= 1ULL << i;\n+ bitmaps[pi.partition_of_texel[idx]] |= 1ULL << i;\n+ }\n}\n- pi.partition_index = partition_index;\n- return pi.partition_count == partition_count;\n+ // Populate the validity mask\n+ bool valid = pi.partition_count == partition_count;\n+ bsd.partitioning_valid[partition_remap_index] = valid ? 0 : 255;\n+ return valid;\n}\nstatic void build_partition_table_for_one_partition_count(\n@@ -347,7 +377,7 @@ static void build_partition_table_for_one_partition_count(\nfor (unsigned int i = 0; i < BLOCK_MAX_PARTITIONINGS; i++)\n{\n- bool keep = generate_one_partition_info_entry(bsd, partition_count, i, ptab[next_index]);\n+ bool keep = generate_one_partition_info_entry(bsd, partition_count, i, next_index, ptab[next_index]);\nif (can_omit_partitionings && !keep)\n{\nbsd.partitioning_packed_index[partition_count - 2][i] = BLOCK_BAD_PARTITIONING;\n@@ -391,7 +421,7 @@ void init_partition_tables(\npartition_info* par_tab4 = par_tab3 + BLOCK_MAX_PARTITIONINGS;\npartition_info* par_tab1 = par_tab4 + BLOCK_MAX_PARTITIONINGS;\n- generate_one_partition_info_entry(bsd, 1, 0, *par_tab1);\n+ generate_one_partition_info_entry(bsd, 1, 0, 0, *par_tab1);\nbsd.partitioning_count[0] = 1;\nuint64_t* canonical_patterns = new uint64_t[BLOCK_MAX_PARTITIONINGS * 7];\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Optimize memory layout for partition coverage masks
61,745
10.03.2022 22:13:13
0
2ff7b8a4941895de11f962c59bb704b2d583349b
Use per-partition count validity arrays
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_find_best_partitioning.cpp", "new_path": "Source/astcenc_find_best_partitioning.cpp", "diff": "@@ -365,7 +365,7 @@ static void count_partition_mismatch_bits(\nfor (unsigned int i = 0; i < bsd.partitioning_count[partition_count - 1]; i++)\n{\nint bitcount = partition_mismatch2(bitmaps, bsd.coverage_bitmaps_2[i]);\n- mismatch_counts[i] = astc::max(bitcount, static_cast<int>(bsd.partitioning_valid[i]));\n+ mismatch_counts[i] = astc::max(bitcount, static_cast<int>(bsd.partitioning_valid_2[i]));\n}\n}\nelse if (partition_count == 3)\n@@ -373,7 +373,7 @@ static void count_partition_mismatch_bits(\nfor (unsigned int i = 0; i < bsd.partitioning_count[partition_count - 1]; i++)\n{\nint bitcount = partition_mismatch3(bitmaps, bsd.coverage_bitmaps_3[i]);\n- mismatch_counts[i] = astc::max(bitcount, static_cast<int>(bsd.partitioning_valid[i]));\n+ mismatch_counts[i] = astc::max(bitcount, static_cast<int>(bsd.partitioning_valid_3[i]));\n}\n}\nelse\n@@ -381,7 +381,7 @@ static void count_partition_mismatch_bits(\nfor (unsigned int i = 0; i < bsd.partitioning_count[partition_count - 1]; i++)\n{\nint bitcount = partition_mismatch4(bitmaps, bsd.coverage_bitmaps_4[i]);\n- mismatch_counts[i] = astc::max(bitcount, static_cast<int>(bsd.partitioning_valid[i]));\n+ mismatch_counts[i] = astc::max(bitcount, static_cast<int>(bsd.partitioning_valid_4[i]));\n}\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_internal.h", "new_path": "Source/astcenc_internal.h", "diff": "@@ -723,28 +723,42 @@ struct block_size_descriptor\nuint8_t kmeans_texels[BLOCK_MAX_KMEANS_TEXELS];\n/**\n- * @brief Is 0 if this partition is valid for compression 255 otherwise.\n+ * @brief Is 0 if this 2-partition is valid for compression 255 otherwise.\n*\n* Indexed by remapped index, not physical index.\n*/\n- uint8_t partitioning_valid[BLOCK_MAX_PARTITIONINGS];\n+ uint8_t partitioning_valid_2[BLOCK_MAX_PARTITIONINGS];\n/**\n- * @brief The canonical 2 partition coverage pattern used during block partition search.\n+ * @brief The canonical 2-partition coverage pattern used during block partition search.\n*\n* Indexed by remapped index, not physical index.\n*/\nuint64_t coverage_bitmaps_2[BLOCK_MAX_PARTITIONINGS][2];\n/**\n- * @brief The canonical 3 partition coverage pattern used during block partition search.\n+ * @brief Is 0 if this 3-partition is valid for compression 255 otherwise.\n+ *\n+ * Indexed by remapped index, not physical index.\n+ */\n+ uint8_t partitioning_valid_3[BLOCK_MAX_PARTITIONINGS];\n+\n+ /**\n+ * @brief The canonical 3-partition coverage pattern used during block partition search.\n*\n* Indexed by remapped index, not physical index.\n*/\nuint64_t coverage_bitmaps_3[BLOCK_MAX_PARTITIONINGS][3];\n/**\n- * @brief The canonical 4 partition coverage pattern used during block partition search.\n+ * @brief Is 0 if this 4-partition is valid for compression 255 otherwise.\n+ *\n+ * Indexed by remapped index, not physical index.\n+ */\n+ uint8_t partitioning_valid_4[BLOCK_MAX_PARTITIONINGS];\n+\n+ /**\n+ * @brief The canonical 4-partition coverage pattern used during block partition search.\n*\n* Indexed by remapped index, not physical index.\n*/\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_partition_tables.cpp", "new_path": "Source/astcenc_partition_tables.cpp", "diff": "@@ -320,17 +320,21 @@ static bool generate_one_partition_info_entry(\n// Populate the coverage bitmaps for 2/3/4 partitions\nuint64_t* bitmaps { nullptr };\n+ uint8_t* valids { nullptr };\nif (partition_count == 2)\n{\nbitmaps = bsd.coverage_bitmaps_2[partition_remap_index];\n+ valids = bsd.partitioning_valid_2;\n}\nelse if (partition_count == 3)\n{\nbitmaps = bsd.coverage_bitmaps_3[partition_remap_index];\n+ valids = bsd.partitioning_valid_3;\n}\nelse if (partition_count == 4)\n{\nbitmaps = bsd.coverage_bitmaps_4[partition_remap_index];\n+ valids = bsd.partitioning_valid_4;\n}\nfor (unsigned int i = 0; i < BLOCK_MAX_PARTITIONS; i++)\n@@ -338,8 +342,14 @@ static bool generate_one_partition_info_entry(\npi.partition_texel_count[i] = static_cast<uint8_t>(counts[i]);\n}\n+ // Valid partitionings have texels in all of the requested partitions\n+ bool valid = pi.partition_count == partition_count;\n+\nif (bitmaps)\n{\n+ // Populate the bitmap validity mask\n+ valids[partition_remap_index] = valid ? 0 : 255;\n+\nfor (unsigned int i = 0; i < partition_count; i++)\n{\nbitmaps[i] = 0ULL;\n@@ -353,9 +363,6 @@ static bool generate_one_partition_info_entry(\n}\n}\n- // Populate the validity mask\n- bool valid = pi.partition_count == partition_count;\n- bsd.partitioning_valid[partition_remap_index] = valid ? 0 : 255;\nreturn valid;\n}\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Use per-partition count validity arrays
61,745
10.03.2022 22:44:15
0
6a7717eba750bac0f60583b6c4910e16119e3039
Improve comment for remaining TODO
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_find_best_partitioning.cpp", "new_path": "Source/astcenc_find_best_partitioning.cpp", "diff": "@@ -529,7 +529,10 @@ void find_best_partition_candidates(\nunsigned int partition = partition_sequence[i];\nconst auto& pi = bsd.get_raw_partition_info(partition_count, partition);\n- // TODO: This escape shouldn't be needed eventually ...\n+ // TODO: This escape shouldn't really be needed. We should return\n+ // the number of blocks which have usable (!= 255) mismatch count\n+ // from compute_kmeans_partition_ordering and use that as the upper\n+ // loop limit.\nunsigned int bk_partition_count = pi.partition_count;\nif (bk_partition_count < partition_count)\n{\n@@ -629,7 +632,10 @@ void find_best_partition_candidates(\nunsigned int partition = partition_sequence[i];\nconst auto& pi = bsd.get_raw_partition_info(partition_count, partition);\n- // TODO: This escape shouldn't be needed eventually ...\n+ // TODO: This escape shouldn't really be needed. We should return\n+ // the number of blocks which have usable (!= 255) mismatch count\n+ // from compute_kmeans_partition_ordering and use that as the upper\n+ // loop limit.\nunsigned int bk_partition_count = pi.partition_count;\nif (bk_partition_count < partition_count)\n{\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Improve comment for remaining TODO
61,745
11.03.2022 21:21:54
0
ac6467c1d2b2135c3882574044bb44bc293ac60b
Move to static resource allocation in context structure
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_block_sizes.cpp", "new_path": "Source/astcenc_block_sizes.cpp", "diff": "@@ -768,8 +768,8 @@ static int construct_dt_entry_2d(\nbool try_2planes = (2 * weight_count) <= BLOCK_MAX_WEIGHTS;\n- decimation_info *di = aligned_malloc<decimation_info>(sizeof(decimation_info), ASTCENC_VECALIGN);\n- init_decimation_info_2d(x_texels, y_texels, x_weights, y_weights, *di, wb);\n+ decimation_info& di = bsd.decimation_tables[dm_index];\n+ init_decimation_info_2d(x_texels, y_texels, x_weights, y_weights, di, wb);\nint maxprec_1plane = -1;\nint maxprec_2planes = -1;\n@@ -799,8 +799,6 @@ static int construct_dt_entry_2d(\n// Default to not enabled - we'll populate these based on active block modes\nbsd.decimation_modes[dm_index].percentile_hit = false;\n- bsd.decimation_tables[dm_index] = di;\n-\nbsd.decimation_mode_count++;\nreturn dm_index;\n}\n@@ -965,7 +963,6 @@ static void construct_block_size_descriptor_2d(\nbsd.decimation_modes[i].maxprec_1plane = -1;\nbsd.decimation_modes[i].maxprec_2planes = -1;\nbsd.decimation_modes[i].percentile_hit = false;\n- bsd.decimation_tables[i] = nullptr;\n}\n// Determine the texels to use for kmeans clustering.\n@@ -1022,9 +1019,9 @@ static void construct_block_size_descriptor_3d(\ncontinue;\n}\n- decimation_info *di = aligned_malloc<decimation_info>(sizeof(decimation_info), ASTCENC_VECALIGN);\n+ decimation_info& di = bsd.decimation_tables[decimation_mode_count];\ndecimation_mode_index[z_weights * 64 + y_weights * 8 + x_weights] = decimation_mode_count;\n- init_decimation_info_3d(x_texels, y_texels, z_texels, x_weights, y_weights, z_weights, *di, *wb);\n+ init_decimation_info_3d(x_texels, y_texels, z_texels, x_weights, y_weights, z_weights, di, *wb);\nint maxprec_1plane = -1;\nint maxprec_2planes = -1;\n@@ -1051,7 +1048,6 @@ static void construct_block_size_descriptor_3d(\nbsd.decimation_modes[decimation_mode_count].maxprec_1plane = static_cast<int8_t>(maxprec_1plane);\nbsd.decimation_modes[decimation_mode_count].maxprec_2planes = static_cast<int8_t>(maxprec_2planes);\nbsd.decimation_modes[decimation_mode_count].percentile_hit = false;\n- bsd.decimation_tables[decimation_mode_count] = di;\ndecimation_mode_count++;\n}\n}\n@@ -1063,7 +1059,6 @@ static void construct_block_size_descriptor_3d(\nbsd.decimation_modes[i].maxprec_1plane = -1;\nbsd.decimation_modes[i].maxprec_2planes = -1;\nbsd.decimation_modes[i].percentile_hit = false;\n- bsd.decimation_tables[i] = nullptr;\n}\nbsd.decimation_mode_count = decimation_mode_count;\n@@ -1147,13 +1142,3 @@ void init_block_size_descriptor(\ninit_partition_tables(bsd, can_omit_modes, partition_count_cutoff);\n}\n-\n-/* See header for documentation. */\n-void term_block_size_descriptor(\n- block_size_descriptor& bsd\n-) {\n- for (unsigned int i = 0; i < bsd.decimation_mode_count; i++)\n- {\n- aligned_free<const decimation_info>(bsd.decimation_tables[i]);\n- }\n-}\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_compress_symbolic.cpp", "new_path": "Source/astcenc_compress_symbolic.cpp", "diff": "@@ -1185,7 +1185,7 @@ void compress_block(\n{\nastcenc_profile decode_mode = ctx.config.profile;\nsymbolic_compressed_block scb;\n- const block_size_descriptor* bsd = ctx.bsd;\n+ const block_size_descriptor& bsd = ctx.bsd;\nfloat lowest_correl;\nTRACE_NODE(node0, \"block\");\n@@ -1246,12 +1246,12 @@ void compress_block(\ntrace_add_data(\"exit\", \"quality hit\");\n- symbolic_to_physical(*bsd, scb, pcb);\n+ symbolic_to_physical(bsd, scb, pcb);\nreturn;\n}\n#if !defined(ASTCENC_DIAGNOSTICS)\n- float error_weight_sum = hadd_s(blk.channel_weight) * bsd->texel_count;\n+ float error_weight_sum = hadd_s(blk.channel_weight) * bsd.texel_count;\nfloat error_threshold = ctx.config.tune_db_limit\n* error_weight_sum\n* block_is_l_scale\n@@ -1289,7 +1289,7 @@ void compress_block(\n// Only enable MODE0 fast path (trial 0) if 2D and more than 25 texels\nint start_trial = 1;\n- if ((bsd->texel_count >= TUNE_MIN_TEXELS_MODE0_FASTPATH) && (bsd->zdim == 1))\n+ if ((bsd.texel_count >= TUNE_MIN_TEXELS_MODE0_FASTPATH) && (bsd.zdim == 1))\n{\nstart_trial = 0;\n}\n@@ -1302,7 +1302,7 @@ void compress_block(\ntrace_add_data(\"search_mode\", i);\nfloat errorval = compress_symbolic_block_for_partition_1plane(\n- ctx.config, *bsd, blk, i == 0,\n+ ctx.config, bsd, blk, i == 0,\nerror_threshold * errorval_mult[i] * errorval_overshoot,\n1, 0, scb, tmpbuf);\n@@ -1315,7 +1315,7 @@ void compress_block(\n}\n#if !defined(ASTCENC_DIAGNOSTICS)\n- lowest_correl = prepare_block_statistics(bsd->texel_count, blk);\n+ lowest_correl = prepare_block_statistics(bsd.texel_count, blk);\n#endif\nblock_skip_two_plane = lowest_correl > ctx.config.tune_2_plane_early_out_limit_correlation;\n@@ -1348,7 +1348,7 @@ void compress_block(\n}\nfloat errorval = compress_symbolic_block_for_partition_2planes(\n- ctx.config, *bsd, blk, error_threshold * errorval_overshoot,\n+ ctx.config, bsd, blk, error_threshold * errorval_overshoot,\ni, scb, tmpbuf);\n// If attempting two planes is much worse than the best one plane result\n@@ -1370,7 +1370,7 @@ void compress_block(\n{\nunsigned int partition_indices[2] { 0 };\n- find_best_partition_candidates(*bsd, blk, partition_count,\n+ find_best_partition_candidates(bsd, blk, partition_count,\nctx.config.tune_partition_index_limit,\npartition_indices);\n@@ -1383,7 +1383,7 @@ void compress_block(\ntrace_add_data(\"search_mode\", i);\nfloat errorval = compress_symbolic_block_for_partition_1plane(\n- ctx.config, *bsd, blk, false,\n+ ctx.config, bsd, blk, false,\nerror_threshold * errorval_overshoot,\npartition_count, partition_indices[i],\nscb, tmpbuf);\n@@ -1432,7 +1432,7 @@ END_OF_TESTS:\n}\n// Compress to a physical block\n- symbolic_to_physical(*bsd, scb, pcb);\n+ symbolic_to_physical(bsd, scb, pcb);\n}\n#endif\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_decompress_symbolic.cpp", "new_path": "Source/astcenc_decompress_symbolic.cpp", "diff": "@@ -342,7 +342,7 @@ float compute_symbolic_block_difference_2plane(\n// Get the appropriate block descriptor\nconst block_mode& bm = bsd.get_block_mode(scb.block_mode);\n- const decimation_info& di = *(bsd.decimation_tables[bm.decimation_mode]);\n+ const decimation_info& di = bsd.get_decimation_info(bm.decimation_mode);\n// Unquantize and undecimate the weights\nint plane1_weights[BLOCK_MAX_TEXELS];\n@@ -439,7 +439,7 @@ float compute_symbolic_block_difference_1plane(\n// Get the appropriate block descriptor\nconst block_mode& bm = bsd.get_block_mode(scb.block_mode);\n- const decimation_info& di = *(bsd.decimation_tables[bm.decimation_mode]);\n+ const decimation_info& di = bsd.get_decimation_info(bm.decimation_mode);\n// Unquantize and undecimate the weights\nint plane1_weights[BLOCK_MAX_TEXELS];\n@@ -532,7 +532,7 @@ float compute_symbolic_block_difference_1plane_1partition(\n// Get the appropriate block descriptor\nconst block_mode& bm = bsd.get_block_mode(scb.block_mode);\n- const decimation_info& di = *(bsd.decimation_tables[bm.decimation_mode]);\n+ const decimation_info& di = bsd.get_decimation_info(bm.decimation_mode);\n// Unquantize and undecimate the weights\nalignas(ASTCENC_VECALIGN) int plane1_weights[BLOCK_MAX_TEXELS];\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_entry.cpp", "new_path": "Source/astcenc_entry.cpp", "diff": "@@ -675,8 +675,6 @@ astcenc_error astcenc_context_alloc(\nastcenc_context** context\n) {\nastcenc_error status;\n- astcenc_context* ctx = nullptr;\n- block_size_descriptor* bsd = nullptr;\nconst astcenc_config& config = *configp;\nstatus = validate_cpu_float();\n@@ -704,7 +702,7 @@ astcenc_error astcenc_context_alloc(\n}\n#endif\n- ctx = new astcenc_context;\n+ astcenc_context* ctx = aligned_malloc<astcenc_context>(sizeof(astcenc_context), ASTCENC_VECALIGN);\nctx->thread_count = thread_count;\nctx->config = config;\nctx->working_buffers = nullptr;\n@@ -720,15 +718,12 @@ astcenc_error astcenc_context_alloc(\nreturn status;\n}\n- bsd = new block_size_descriptor;\n- ctx->bsd = bsd;\n-\nbool can_omit_modes = config.flags & ASTCENC_FLG_SELF_DECOMPRESS_ONLY;\ninit_block_size_descriptor(config.block_x, config.block_y, config.block_z,\ncan_omit_modes,\nconfig.tune_partition_count_limit,\nstatic_cast<float>(config.tune_block_mode_limit) / 100.0f,\n- *bsd);\n+ ctx->bsd);\n#if !defined(ASTCENC_DECOMPRESS_ONLY)\n// Do setup only needed by compression\n@@ -750,8 +745,6 @@ astcenc_error astcenc_context_alloc(\n\"compression_working_buffers size must be multiple of vector alignment\");\nif (!ctx->working_buffers)\n{\n- term_block_size_descriptor(*bsd);\n- delete bsd;\ndelete ctx;\n*context = nullptr;\nreturn ASTCENC_ERR_OUT_OF_MEM;\n@@ -787,12 +780,10 @@ void astcenc_context_free(\nif (ctx)\n{\naligned_free<compression_working_buffers>(ctx->working_buffers);\n- term_block_size_descriptor(*(ctx->bsd));\n#if defined(ASTCENC_DIAGNOSTICS)\ndelete ctx->trace_log;\n#endif\n- delete ctx->bsd;\n- delete ctx;\n+ aligned_free<astcenc_context>(ctx);\n}\n}\n@@ -814,14 +805,14 @@ static void compress_image(\nconst astcenc_swizzle& swizzle,\nuint8_t* buffer\n) {\n- const block_size_descriptor *bsd = ctx.bsd;\n+ const block_size_descriptor& bsd = ctx.bsd;\nastcenc_profile decode_mode = ctx.config.profile;\nimage_block blk;\n- int block_x = bsd->xdim;\n- int block_y = bsd->ydim;\n- int block_z = bsd->zdim;\n+ int block_x = bsd.xdim;\n+ int block_y = bsd.ydim;\n+ int block_z = bsd.zdim;\nblk.texel_count = block_x * block_y * block_z;\nint dim_x = image.dim_x;\n@@ -899,7 +890,7 @@ static void compress_image(\n// Fetch the full block for compression\nif (use_full_block)\n{\n- fetch_image_block(decode_mode, image, blk, *bsd, x * block_x, y * block_y, z * block_z, swizzle);\n+ fetch_image_block(decode_mode, image, blk, bsd, x * block_x, y * block_y, z * block_z, swizzle);\n}\n// Apply alpha scale RDO - substitute constant color block\nelse\n@@ -1124,13 +1115,13 @@ astcenc_error astcenc_decompress_image(\nphysical_compressed_block pcb = *(const physical_compressed_block*)bp;\nsymbolic_compressed_block scb;\n- physical_to_symbolic(*ctx->bsd, pcb, scb);\n+ physical_to_symbolic(ctx->bsd, pcb, scb);\n- decompress_symbolic_block(ctx->config.profile, *ctx->bsd,\n+ decompress_symbolic_block(ctx->config.profile, ctx->bsd,\nx * block_x, y * block_y, z * block_z,\nscb, blk);\n- write_image_block(image_out, blk, *ctx->bsd,\n+ write_image_block(image_out, blk, ctx->bsd,\nx * block_x, y * block_y, z * block_z, *swizzle);\n}\n@@ -1163,10 +1154,10 @@ astcenc_error astcenc_get_block_info(\n// Decode the compressed data into a symbolic form\nphysical_compressed_block pcb = *(const physical_compressed_block*)data;\nsymbolic_compressed_block scb;\n- physical_to_symbolic(*ctx->bsd, pcb, scb);\n+ physical_to_symbolic(ctx->bsd, pcb, scb);\n// Fetch the appropriate partition and decimation tables\n- block_size_descriptor& bsd = *ctx->bsd;\n+ block_size_descriptor& bsd = ctx->bsd;\n// Start from a clean slate\nmemset(info, 0, sizeof(*info));\n@@ -1199,7 +1190,7 @@ astcenc_error astcenc_get_block_info(\nconst auto& pi = bsd.get_partition_info(partition_count, scb.partition_index);\nconst block_mode& bm = bsd.get_block_mode(scb.block_mode);\n- const decimation_info& di = *bsd.decimation_tables[bm.decimation_mode];\n+ const decimation_info& di = bsd.get_decimation_info(bm.decimation_mode);\ninfo->weight_x = di.weight_x;\ninfo->weight_y = di.weight_y;\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_internal.h", "new_path": "Source/astcenc_internal.h", "diff": "@@ -701,7 +701,7 @@ struct block_size_descriptor\ndecimation_mode decimation_modes[WEIGHTS_MAX_DECIMATION_MODES];\n/** @brief The active decimation tables, stored in low indices. */\n- const decimation_info *decimation_tables[WEIGHTS_MAX_DECIMATION_MODES];\n+ alignas(ASTCENC_VECALIGN) decimation_info decimation_tables[WEIGHTS_MAX_DECIMATION_MODES];\n/** @brief The packed block mode array index, or @c BLOCK_BAD_BLOCK_MODE if not active. */\nuint16_t block_mode_packed_index[WEIGHTS_MAX_BLOCK_MODES];\n@@ -811,7 +811,7 @@ struct block_size_descriptor\n*/\nconst decimation_info& get_decimation_info(unsigned int decimation_mode) const\n{\n- return *this->decimation_tables[decimation_mode];\n+ return this->decimation_tables[decimation_mode];\n}\n/**\n@@ -1375,7 +1375,7 @@ struct astcenc_context\nunsigned int thread_count;\n/** @brief The block size descriptor this context was created with. */\n- block_size_descriptor* bsd;\n+ alignas(ASTCENC_VECALIGN) block_size_descriptor bsd;\n/*\n* Fields below here are not needed in a decompress-only build, but some remain as they are\n@@ -1425,8 +1425,7 @@ struct astcenc_context\n* @brief Populate the block size descriptor for the target block size.\n*\n* This will also initialize the partition table metadata, which is stored as part of the BSD\n- * structure. All initialized block size descriptors must be terminated using a call to\n- * @c term_block_size_descriptor() to free resources.\n+ * structure.\n*\n* @param x_texels The number of texels in the block X dimension.\n* @param y_texels The number of texels in the block Y dimension.\n@@ -1445,14 +1444,6 @@ void init_block_size_descriptor(\nfloat mode_cutoff,\nblock_size_descriptor& bsd);\n-/**\n- * @brief Terminate a block size descriptor and free associated resources.\n- *\n- * @param bsd The descriptor to terminate.\n- */\n-void term_block_size_descriptor(\n- block_size_descriptor& bsd);\n-\n/**\n* @brief Populate the partition tables for the target block size.\n*\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_weight_align.cpp", "new_path": "Source/astcenc_weight_align.cpp", "diff": "@@ -521,7 +521,7 @@ void compute_angular_endpoints_1plane(\ncontinue;\n}\n- unsigned int weight_count = bsd.decimation_tables[i]->weight_count;\n+ unsigned int weight_count = bsd.get_decimation_info(i).weight_count;\nif (weight_count < tune_low_weight_limit)\n{\n@@ -584,7 +584,7 @@ void compute_angular_endpoints_2planes(\ncontinue;\n}\n- unsigned int weight_count = bsd.decimation_tables[i]->weight_count;\n+ unsigned int weight_count = bsd.get_decimation_info(i).weight_count;\nif (weight_count < tune_low_weight_limit)\n{\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Move to static resource allocation in context structure
61,745
13.03.2022 09:02:02
0
7fa17658f90f6766d43cc0f08a9cfd818d7e08db
Fix mismatched aligned_alloc/delete pairs
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_entry.cpp", "new_path": "Source/astcenc_entry.cpp", "diff": "@@ -714,7 +714,7 @@ astcenc_error astcenc_context_alloc(\nstatus = validate_config(ctx->config);\nif (status != ASTCENC_SUCCESS)\n{\n- delete ctx;\n+ aligned_free<astcenc_context>(ctx);\nreturn status;\n}\n@@ -745,7 +745,7 @@ astcenc_error astcenc_context_alloc(\n\"compression_working_buffers size must be multiple of vector alignment\");\nif (!ctx->working_buffers)\n{\n- delete ctx;\n+ aligned_free<astcenc_context>(ctx);\n*context = nullptr;\nreturn ASTCENC_ERR_OUT_OF_MEM;\n}\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Fix mismatched aligned_alloc/delete pairs
61,745
13.03.2022 15:17:29
0
4c3ef88bf369ff8ad089798b53c32db0036fb8bd
Fix vint4::loada() for NEON and loada unit tests
[ { "change_type": "MODIFY", "old_path": "Source/UnitTest/test_simd.cpp", "new_path": "Source/UnitTest/test_simd.cpp", "diff": "// SPDX-License-Identifier: Apache-2.0\n// ----------------------------------------------------------------------------\n-// Copyright 2020-2021 Arm Limited\n+// Copyright 2020-2022 Arm Limited\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n// use this file except in compliance with the License. You may obtain a copy\n@@ -353,7 +353,7 @@ TEST(vfloat4, Load1)\n/** @brief Test vfloat4 loada. */\nTEST(vfloat4, Loada)\n{\n- vfloat4 a(&(f32_data[0]));\n+ vfloat4 a = vfloat4::loada(&(f32_data[0]));\nEXPECT_EQ(a.lane<0>(), 0.0f);\nEXPECT_EQ(a.lane<1>(), 1.0f);\nEXPECT_EQ(a.lane<2>(), 2.0f);\n@@ -1263,7 +1263,7 @@ TEST(vint4, Load1)\n/** @brief Test vint4 loada. */\nTEST(vint4, Loada)\n{\n- vint4 a(&(s32_data[0]));\n+ vint4 a = vint4::loada(&(s32_data[0]));\nEXPECT_EQ(a.lane<0>(), 0);\nEXPECT_EQ(a.lane<1>(), 1);\nEXPECT_EQ(a.lane<2>(), 2);\n@@ -2023,7 +2023,7 @@ TEST(vfloat8, Load1)\n/** @brief Test vfloat8 loada. */\nTEST(vfloat8, Loada)\n{\n- vfloat8 a(&(f32_data[0]));\n+ vfloat8 a = vfloat8::loada(&(f32_data[0]));\nEXPECT_EQ(a.lane<0>(), 0.0f);\nEXPECT_EQ(a.lane<1>(), 1.0f);\nEXPECT_EQ(a.lane<2>(), 2.0f);\n@@ -2776,7 +2776,7 @@ TEST(vint8, Load1)\n/** @brief Test vint8 loada. */\nTEST(vint8, Loada)\n{\n- vint8 a(&(s32_data[0]));\n+ vint8 a = vint8::loada(&(s32_data[0]));\nEXPECT_EQ(a.lane<0>(), 0);\nEXPECT_EQ(a.lane<1>(), 1);\nEXPECT_EQ(a.lane<2>(), 2);\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_neon_4.h", "new_path": "Source/astcenc_vecmathlib_neon_4.h", "diff": "// SPDX-License-Identifier: Apache-2.0\n// ----------------------------------------------------------------------------\n-// Copyright 2019-2021 Arm Limited\n+// Copyright 2019-2022 Arm Limited\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n// use this file except in compliance with the License. You may obtain a copy\n@@ -275,7 +275,7 @@ struct vint4\n*/\nstatic ASTCENC_SIMD_INLINE vint4 loada(const int* p)\n{\n- return vint4(*p);\n+ return vint4(p);\n}\n/**\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Fix vint4::loada() for NEON and loada unit tests
61,745
14.03.2022 20:25:23
0
a6aa4d32572a7e8dad92ae055716978cb359a659
Hoist channel_weight setup out of block loop
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_entry.cpp", "new_path": "Source/astcenc_entry.cpp", "diff": "@@ -825,15 +825,22 @@ static void compress_image(\nint xblocks = (dim_x + block_x - 1) / block_x;\nint yblocks = (dim_y + block_y - 1) / block_y;\nint zblocks = (dim_z + block_z - 1) / block_z;\n+ int block_count = zblocks * yblocks * xblocks;\nint row_blocks = xblocks;\nint plane_blocks = xblocks * yblocks;\n+ // Populate the block channel weights\n+ blk.channel_weight = vfloat4(ctx.config.cw_r_weight,\n+ ctx.config.cw_g_weight,\n+ ctx.config.cw_b_weight,\n+ ctx.config.cw_a_weight);\n+\n// Use preallocated scratch buffer\nauto& temp_buffers = ctx.working_buffers[thread_index];\n// Only the first thread actually runs the initializer\n- ctx.manage_compress.init(zblocks * yblocks * xblocks);\n+ ctx.manage_compress.init(block_count);\n// All threads run this processing loop until there is no work remaining\nwhile (true)\n@@ -905,12 +912,6 @@ static void compress_image(\nblk.grayscale = true;\n}\n- // Populate the block channel weights\n- blk.channel_weight = vfloat4(ctx.config.cw_r_weight,\n- ctx.config.cw_g_weight,\n- ctx.config.cw_b_weight,\n- ctx.config.cw_a_weight);\n-\nint offset = ((z * yblocks + y) * xblocks + x) * 16;\nuint8_t *bp = buffer + offset;\nphysical_compressed_block* pcb = reinterpret_cast<physical_compressed_block*>(bp);\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Hoist channel_weight setup out of block loop
61,745
14.03.2022 22:20:05
0
9bf7bc8dbd3e4a55226251325e52db49c740c2f9
Specialize bilinear_infil functions for single-axis decimation Add a new infill variant for "half decimated" (2 weight linear interpolation) which can be used when only a single axis is decimated.
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_block_sizes.cpp", "new_path": "Source/astcenc_block_sizes.cpp", "diff": "@@ -322,9 +322,11 @@ static void init_decimation_info_2d(\n}\n}\n+ uint8_t max_texel_weight_count = 0;\nfor (unsigned int i = 0; i < texels_per_block; i++)\n{\ndi.texel_weight_count[i] = wb.weight_count_of_texel[i];\n+ max_texel_weight_count = astc::max(max_texel_weight_count, di.texel_weight_count[i]);\nfor (unsigned int j = 0; j < wb.weight_count_of_texel[i]; j++)\n{\n@@ -342,6 +344,8 @@ static void init_decimation_info_2d(\n}\n}\n+ di.max_texel_weight_count = max_texel_weight_count;\n+\nfor (unsigned int i = 0; i < weights_per_block; i++)\n{\nunsigned int texel_count_wt = wb.texel_count_of_weight[i];\n@@ -587,9 +591,11 @@ static void init_decimation_info_3d(\n}\n}\n+ uint8_t max_texel_weight_count = 0;\nfor (unsigned int i = 0; i < texels_per_block; i++)\n{\ndi.texel_weight_count[i] = wb.weight_count_of_texel[i];\n+ max_texel_weight_count = astc::max(max_texel_weight_count, di.texel_weight_count[i]);\n// Init all 4 entries so we can rely on zeros for vectorization\nfor (unsigned int j = 0; j < 4; j++)\n@@ -607,6 +613,8 @@ static void init_decimation_info_3d(\n}\n}\n+ di.max_texel_weight_count = max_texel_weight_count;\n+\nfor (unsigned int i = 0; i < weights_per_block; i++)\n{\nunsigned int texel_count_wt = wb.texel_count_of_weight[i];\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_ideal_endpoints_and_weights.cpp", "new_path": "Source/astcenc_ideal_endpoints_and_weights.cpp", "diff": "@@ -615,10 +615,8 @@ float compute_error_of_weight_set_1plane(\nfloat error_summa = 0.0f;\nunsigned int texel_count = di.texel_count;\n- bool is_decimated = di.texel_count != di.weight_count;\n-\n// Process SIMD-width chunks, safe to over-fetch - the extra space is zero initialized\n- if (is_decimated)\n+ if (di.max_texel_weight_count > 2)\n{\nfor (unsigned int i = 0; i < texel_count; i += ASTCENC_SIMD_WIDTH)\n{\n@@ -634,6 +632,22 @@ float compute_error_of_weight_set_1plane(\nhaccumulate(error_summav, error);\n}\n}\n+ else if (di.max_texel_weight_count > 1)\n+ {\n+ for (unsigned int i = 0; i < texel_count; i += ASTCENC_SIMD_WIDTH)\n+ {\n+ // Compute the bilinear interpolation of the decimated weight grid\n+ vfloat current_values = bilinear_infill_vla_2(di, dec_weight_quant_uvalue, i);\n+\n+ // Compute the error between the computed value and the ideal weight\n+ vfloat actual_values = loada(eai.weights + i);\n+ vfloat diff = current_values - actual_values;\n+ vfloat significance = loada(eai.weight_error_scale + i);\n+ vfloat error = diff * diff * significance;\n+\n+ haccumulate(error_summav, error);\n+ }\n+ }\nelse\n{\nfor (unsigned int i = 0; i < texel_count; i += ASTCENC_SIMD_WIDTH)\n@@ -668,10 +682,9 @@ float compute_error_of_weight_set_2planes(\nvfloat4 error_summav = vfloat4::zero();\nfloat error_summa = 0.0f;\nunsigned int texel_count = di.texel_count;\n- bool is_decimated = di.texel_count != di.weight_count;\n// Process SIMD-width chunks, safe to over-fetch - the extra space is zero initialized\n- if (is_decimated)\n+ if (di.max_texel_weight_count > 2)\n{\nfor (unsigned int i = 0; i < texel_count; i += ASTCENC_SIMD_WIDTH)\n{\n@@ -696,6 +709,31 @@ float compute_error_of_weight_set_2planes(\nhaccumulate(error_summav, error1 + error2);\n}\n}\n+ else if (di.max_texel_weight_count > 1)\n+ {\n+ for (unsigned int i = 0; i < texel_count; i += ASTCENC_SIMD_WIDTH)\n+ {\n+ // Plane 1\n+ // Compute the bilinear interpolation of the decimated weight grid\n+ vfloat current_values1 = bilinear_infill_vla_2(di, dec_weight_quant_uvalue_plane1, i);\n+\n+ // Compute the error between the computed value and the ideal weight\n+ vfloat actual_values1 = loada(eai1.weights + i);\n+ vfloat diff = current_values1 - actual_values1;\n+ vfloat error1 = diff * diff * loada(eai1.weight_error_scale + i);\n+\n+ // Plane 2\n+ // Compute the bilinear interpolation of the decimated weight grid\n+ vfloat current_values2 = bilinear_infill_vla_2(di, dec_weight_quant_uvalue_plane2, i);\n+\n+ // Compute the error between the computed value and the ideal weight\n+ vfloat actual_values2 = loada(eai2.weights + i);\n+ diff = current_values2 - actual_values2;\n+ vfloat error2 = diff * diff * loada(eai2.weight_error_scale + i);\n+\n+ haccumulate(error_summav, error1 + error2);\n+ }\n+ }\nelse\n{\nfor (unsigned int i = 0; i < texel_count; i += ASTCENC_SIMD_WIDTH)\n@@ -820,11 +858,22 @@ void compute_ideal_weights_for_decimation(\n// Populate the interpolated weight grid based on the initital average\n// Process SIMD-width texel coordinates at at time while we can. Safe to\n// over-process full SIMD vectors - the tail is zeroed.\n+ if (di.max_texel_weight_count <= 2)\n+ {\n+ for (unsigned int i = 0; i < texel_count; i += ASTCENC_SIMD_WIDTH)\n+ {\n+ vfloat weight = bilinear_infill_vla_2(di, dec_weight_ideal_value, i);\n+ storea(weight, infilled_weights + i);\n+ }\n+ }\n+ else\n+ {\nfor (unsigned int i = 0; i < texel_count; i += ASTCENC_SIMD_WIDTH)\n{\nvfloat weight = bilinear_infill_vla(di, dec_weight_ideal_value, i);\nstorea(weight, infilled_weights + i);\n}\n+ }\n// Perform a single iteration of refinement\n// Empirically determined step size; larger values don't help but smaller drops image quality\n@@ -1018,7 +1067,6 @@ void recompute_ideal_colors_1plane(\n) {\nint weight_count = di.weight_count;\nint partition_count = pi.partition_count;\n- bool is_decimated = di.weight_count != di.texel_count;\npromise(weight_count > 0);\npromise(partition_count > 0);\n@@ -1077,11 +1125,15 @@ void recompute_ideal_colors_1plane(\nvfloat4 rgba = blk.texel(tix);\nfloat idx0;\n- if (!is_decimated)\n+ if (di.max_texel_weight_count == 1)\n{\nassert(tix < BLOCK_MAX_WEIGHTS);\nidx0 = dec_weight_quant_uvalue[tix];\n}\n+ else if (di.max_texel_weight_count == 2)\n+ {\n+ idx0 = bilinear_infill_2(di, dec_weight_quant_uvalue, tix);\n+ }\nelse\n{\nidx0 = bilinear_infill(di, dec_weight_quant_uvalue, tix);\n@@ -1218,8 +1270,6 @@ void recompute_ideal_colors_2planes(\nint plane2_component\n) {\nunsigned int weight_count = di.weight_count;\n- bool is_decimated = di.weight_count != di.texel_count;\n-\npromise(weight_count > 0);\nconst quantization_and_transfer_table *qat = &(quant_and_xfer_tables[weight_quant_mode]);\n@@ -1271,11 +1321,15 @@ void recompute_ideal_colors_2planes(\nvfloat4 rgba = blk.texel(j);\nfloat idx0;\n- if (!is_decimated)\n+ if (di.max_texel_weight_count == 1)\n{\nassert(j < BLOCK_MAX_WEIGHTS_2PLANE);\nidx0 = dec_weights_quant_uvalue_plane1[j];\n}\n+ else if (di.max_texel_weight_count == 2)\n+ {\n+ idx0 = bilinear_infill_2(di, dec_weights_quant_uvalue_plane1, j);\n+ }\nelse\n{\nidx0 = bilinear_infill(di, dec_weights_quant_uvalue_plane1, j);\n@@ -1294,11 +1348,15 @@ void recompute_ideal_colors_2planes(\nright1_sum_s += idx0 * idx0;\nfloat idx1;\n- if (!is_decimated)\n+ if (di.max_texel_weight_count == 1)\n{\nassert(j < BLOCK_MAX_WEIGHTS_2PLANE);\nidx1 = dec_weights_quant_uvalue_plane2[j];\n}\n+ else if (di.max_texel_weight_count == 2)\n+ {\n+ idx1 = bilinear_infill_2(di, dec_weights_quant_uvalue_plane2, j);\n+ }\nelse\n{\nidx1 = bilinear_infill(di, dec_weights_quant_uvalue_plane2, j);\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_internal.h", "new_path": "Source/astcenc_internal.h", "diff": "@@ -546,6 +546,9 @@ struct decimation_info\n/** @brief The total number of texels in the block. */\nuint8_t texel_count;\n+ /** @brief The maximum number of stored weights that contribute to each texel, between 1 and 4. */\n+ uint8_t max_texel_weight_count;\n+\n/** @brief The total number of weights stored. */\nuint8_t weight_count;\n@@ -1903,6 +1906,28 @@ static inline float bilinear_infill(\nweights[di.texel_weights_4t[3][index]] * di.texel_weights_float_4t[3][index]);\n}\n+/**\n+ * @brief Compute the infilled weight for a texel index in a decimated grid.\n+ *\n+ * This is specialized version which computes only two weights per texel for\n+ * encodings that are only decimated in a single axis.\n+ *\n+ * @param di The weight grid decimation to use.\n+ * @param weights The decimated weight values to use.\n+ * @param index The texel index to interpolate.\n+ *\n+ * @return The interpolated weight for the given texel.\n+ */\n+static inline float bilinear_infill_2(\n+ const decimation_info& di,\n+ const float* weights,\n+ unsigned int index\n+) {\n+ return (weights[di.texel_weights_4t[0][index]] * di.texel_weights_float_4t[0][index] +\n+ weights[di.texel_weights_4t[1][index]] * di.texel_weights_float_4t[1][index]);\n+}\n+\n+\n/**\n* @brief Compute the infilled weight for N texel indices in a decimated grid.\n*\n@@ -1940,6 +1965,39 @@ static inline vfloat bilinear_infill_vla(\n(weight_val2 * tex_weight_float2 + weight_val3 * tex_weight_float3);\n}\n+/**\n+ * @brief Compute the infilled weight for N texel indices in a decimated grid.\n+ *\n+ * This is specialized version which computes only two weights per texel for\n+ * encodings that are only decimated in a single axis.\n+ *\n+ * @param di The weight grid decimation to use.\n+ * @param weights The decimated weight values to use.\n+ * @param index The first texel index to interpolate.\n+ *\n+ * @return The interpolated weight for the given set of SIMD_WIDTH texels.\n+ */\n+static inline vfloat bilinear_infill_vla_2(\n+ const decimation_info& di,\n+ const float* weights,\n+ unsigned int index\n+) {\n+ // Load the bilinear filter texel weight indexes in the decimated grid\n+ vint weight_idx0 = vint(di.texel_weights_4t[0] + index);\n+ vint weight_idx1 = vint(di.texel_weights_4t[1] + index);\n+\n+ // Load the bilinear filter weights from the decimated grid\n+ vfloat weight_val0 = gatherf(weights, weight_idx0);\n+ vfloat weight_val1 = gatherf(weights, weight_idx1);\n+\n+ // Load the weight contribution factors for each decimated weight\n+ vfloat tex_weight_float0 = loada(di.texel_weights_float_4t[0] + index);\n+ vfloat tex_weight_float1 = loada(di.texel_weights_float_4t[1] + index);\n+\n+ // Compute the bilinear interpolation to generate the per-texel weight\n+ return (weight_val0 * tex_weight_float0 + weight_val1 * tex_weight_float1);\n+}\n+\n/**\n* @brief Compute the error of a decimated weight set for 1 plane.\n*\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Specialize bilinear_infil functions for single-axis decimation (#317) Add a new infill variant for "half decimated" (2 weight linear interpolation) which can be used when only a single axis is decimated.
61,745
17.03.2022 21:30:01
0
0b730c1fe43437b62f06868d571dab8ca13d0f5c
Mark duplicate partititions as invalid in the count min mask
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_find_best_partitioning.cpp", "new_path": "Source/astcenc_find_best_partitioning.cpp", "diff": "@@ -360,9 +360,11 @@ static void count_partition_mismatch_bits(\nconst uint64_t bitmaps[BLOCK_MAX_PARTITIONS],\nunsigned int mismatch_counts[BLOCK_MAX_PARTITIONINGS]\n) {\n+ unsigned int active_count = bsd.partitioning_count[partition_count - 1];\n+\nif (partition_count == 2)\n{\n- for (unsigned int i = 0; i < bsd.partitioning_count[partition_count - 1]; i++)\n+ for (unsigned int i = 0; i < active_count; i++)\n{\nint bitcount = partition_mismatch2(bitmaps, bsd.coverage_bitmaps_2[i]);\nmismatch_counts[i] = astc::max(bitcount, static_cast<int>(bsd.partitioning_valid_2[i]));\n@@ -370,7 +372,7 @@ static void count_partition_mismatch_bits(\n}\nelse if (partition_count == 3)\n{\n- for (unsigned int i = 0; i < bsd.partitioning_count[partition_count - 1]; i++)\n+ for (unsigned int i = 0; i < active_count; i++)\n{\nint bitcount = partition_mismatch3(bitmaps, bsd.coverage_bitmaps_3[i]);\nmismatch_counts[i] = astc::max(bitcount, static_cast<int>(bsd.partitioning_valid_3[i]));\n@@ -378,7 +380,7 @@ static void count_partition_mismatch_bits(\n}\nelse\n{\n- for (unsigned int i = 0; i < bsd.partitioning_count[partition_count - 1]; i++)\n+ for (unsigned int i = 0; i < active_count; i++)\n{\nint bitcount = partition_mismatch4(bitmaps, bsd.coverage_bitmaps_4[i]);\nmismatch_counts[i] = astc::max(bitcount, static_cast<int>(bsd.partitioning_valid_4[i]));\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_partition_tables.cpp", "new_path": "Source/astcenc_partition_tables.cpp", "diff": "@@ -374,6 +374,12 @@ static void build_partition_table_for_one_partition_count(\npartition_info* ptab,\nuint64_t* canonical_patterns\n) {\n+ uint8_t* partitioning_valid[3] {\n+ bsd.partitioning_valid_2,\n+ bsd.partitioning_valid_3,\n+ bsd.partitioning_valid_4\n+ };\n+\nunsigned int next_index = 0;\nbsd.partitioning_count[partition_count - 1] = 0;\n@@ -399,6 +405,7 @@ static void build_partition_table_for_one_partition_count(\nif (match)\n{\nptab[next_index].partition_count = 0;\n+ partitioning_valid[partition_count - 2][i] = 255;\nkeep = !can_omit_partitionings;\nbreak;\n}\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Mark duplicate partititions as invalid in the count min mask
61,745
17.03.2022 21:37:41
0
9f54c5cf2900fbede50030f23e71b0bd1b374379
Make index style consistent for valid array
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_partition_tables.cpp", "new_path": "Source/astcenc_partition_tables.cpp", "diff": "@@ -405,7 +405,7 @@ static void build_partition_table_for_one_partition_count(\nif (match)\n{\nptab[next_index].partition_count = 0;\n- partitioning_valid[partition_count - 2][i] = 255;\n+ partitioning_valid[partition_count - 2][next_index] = 255;\nkeep = !can_omit_partitionings;\nbreak;\n}\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Make index style consistent for valid array
61,745
21.03.2022 20:01:47
0
8715c151ad25cc8e852c7b69c32b084db2eb0119
Keep dot() vector form in realign_weights_decimated loop
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_compress_symbolic.cpp", "new_path": "Source/astcenc_compress_symbolic.cpp", "diff": "@@ -280,9 +280,9 @@ static bool realign_weights_decimated(\nfloat uqw_next_dif = static_cast<float>(next_wt_uq) - uqwf;\nfloat uqw_prev_dif = static_cast<float>(prev_wt_uq) - uqwf;\n- float current_error = 0.0f;\n- float up_error = 0.0f;\n- float down_error = 0.0f;\n+ vfloat4 current_errorv = vfloat4::zero();\n+ vfloat4 up_errorv = vfloat4::zero();\n+ vfloat4 down_errorv = vfloat4::zero();\n// Interpolate the colors to create the diffs\nunsigned int texels_to_evaluate = di.weight_texel_count[we_idx];\n@@ -323,11 +323,15 @@ static bool realign_weights_decimated(\nvfloat4 color_diff = color - orig_color;\nvfloat4 color_up_diff = color_diff + color_offset * plane_up_weight;\nvfloat4 color_down_diff = color_diff + color_offset * plane_down_weight;\n- current_error += dot_s(color_diff * color_diff, error_weight);\n- up_error += dot_s(color_up_diff * color_up_diff, error_weight);\n- down_error += dot_s(color_down_diff * color_down_diff, error_weight);\n+ current_errorv += dot(color_diff * color_diff, error_weight);\n+ up_errorv += dot(color_up_diff * color_up_diff, error_weight);\n+ down_errorv += dot(color_down_diff * color_down_diff, error_weight);\n}\n+ float current_error = current_errorv.lane<0>();\n+ float up_error = up_errorv.lane<0>();\n+ float down_error = down_errorv.lane<0>();\n+\n// Check if the prev or next error is better, and if so use it\nif ((up_error < current_error) && (up_error < down_error))\n{\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Keep dot() vector form in realign_weights_decimated loop
61,745
21.03.2022 21:03:01
0
956812297a6e2947c81da6736b198c2f69cf1498
Vectorize resolve step in avgs_and_dirs
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_averages_and_directions.cpp", "new_path": "Source/astcenc_averages_and_directions.cpp", "diff": "@@ -41,9 +41,7 @@ void compute_avgs_and_dirs_4_comp(\nunsigned int texel_count = pi.partition_texel_count[partition];\npromise(texel_count > 0);\n- // TODO: Try gathers?\nvfloat4 base_sum = vfloat4::zero();\n-\nfor (unsigned int i = 0; i < texel_count; i++)\n{\nint iwt = texel_indexes[i];\n@@ -62,47 +60,46 @@ void compute_avgs_and_dirs_4_comp(\n{\nunsigned int iwt = texel_indexes[i];\nvfloat4 texel_datum = blk.texel(iwt);\n- texel_datum = (texel_datum - average) * texel_weight;\n+ texel_datum = texel_datum - average;\nvfloat4 zero = vfloat4::zero();\n- vmask4 tdm0 = vfloat4(texel_datum.lane<0>()) > zero;\n+ vmask4 tdm0 = texel_datum.swz<0,0,0,0>() > zero;\nsum_xp += select(zero, texel_datum, tdm0);\n- vmask4 tdm1 = vfloat4(texel_datum.lane<1>()) > zero;\n+ vmask4 tdm1 = texel_datum.swz<1,1,1,1>() > zero;\nsum_yp += select(zero, texel_datum, tdm1);\n- vmask4 tdm2 = vfloat4(texel_datum.lane<2>()) > zero;\n+ vmask4 tdm2 = texel_datum.swz<2,2,2,2>() > zero;\nsum_zp += select(zero, texel_datum, tdm2);\n- vmask4 tdm3 = vfloat4(texel_datum.lane<3>()) > zero;\n+ vmask4 tdm3 = texel_datum.swz<3,3,3,3>() > zero;\nsum_wp += select(zero, texel_datum, tdm3);\n}\n- float prod_xp = dot_s(sum_xp, sum_xp);\n- float prod_yp = dot_s(sum_yp, sum_yp);\n- float prod_zp = dot_s(sum_zp, sum_zp);\n- float prod_wp = dot_s(sum_wp, sum_wp);\n+ sum_xp = sum_xp * texel_weight;\n+ sum_yp = sum_yp * texel_weight;\n+ sum_zp = sum_zp * texel_weight;\n+ sum_wp = sum_wp * texel_weight;\n+\n+ vfloat4 prod_xp = dot(sum_xp, sum_xp);\n+ vfloat4 prod_yp = dot(sum_yp, sum_yp);\n+ vfloat4 prod_zp = dot(sum_zp, sum_zp);\n+ vfloat4 prod_wp = dot(sum_wp, sum_wp);\nvfloat4 best_vector = sum_xp;\n- float best_sum = prod_xp;\n+ vfloat4 best_sum = prod_xp;\n- if (prod_yp > best_sum)\n- {\n- best_vector = sum_yp;\n- best_sum = prod_yp;\n- }\n+ vmask4 mask = prod_yp > best_sum;\n+ best_vector = select(best_vector, sum_yp, mask);\n+ best_sum = select(best_sum, prod_yp, mask);\n- if (prod_zp > best_sum)\n- {\n- best_vector = sum_zp;\n- best_sum = prod_zp;\n- }\n+ mask = prod_zp > best_sum;\n+ best_vector = select(best_vector, sum_zp, mask);\n+ best_sum = select(best_sum, prod_zp, mask);\n- if (prod_wp > best_sum)\n- {\n- best_vector = sum_wp;\n- }\n+ mask = prod_wp > best_sum;\n+ best_vector = select(best_vector, sum_wp, mask);\npm[partition].dir = best_vector;\n}\n@@ -173,38 +170,37 @@ void compute_avgs_and_dirs_3_comp(\nvfloat4 texel_datum = vfloat3(data_vr[iwt],\ndata_vg[iwt],\ndata_vb[iwt]);\n-\n- texel_datum = (texel_datum - average) * texel_weight;\n+ texel_datum = texel_datum - average;\nvfloat4 zero = vfloat4::zero();\n- vmask4 tdm0 = vfloat4(texel_datum.lane<0>()) > zero;\n+ vmask4 tdm0 = texel_datum.swz<0,0,0,0>() > zero;\nsum_xp += select(zero, texel_datum, tdm0);\n- vmask4 tdm1 = vfloat4(texel_datum.lane<1>()) > zero;\n+ vmask4 tdm1 = texel_datum.swz<1,1,1,1>() > zero;\nsum_yp += select(zero, texel_datum, tdm1);\n- vmask4 tdm2 = vfloat4(texel_datum.lane<2>()) > zero;\n+ vmask4 tdm2 = texel_datum.swz<2,2,2,2>() > zero;\nsum_zp += select(zero, texel_datum, tdm2);\n}\n- float prod_xp = dot3_s(sum_xp, sum_xp);\n- float prod_yp = dot3_s(sum_yp, sum_yp);\n- float prod_zp = dot3_s(sum_zp, sum_zp);\n+ sum_xp = sum_xp * texel_weight;\n+ sum_yp = sum_yp * texel_weight;\n+ sum_zp = sum_zp * texel_weight;\n+\n+ vfloat4 prod_xp = dot(sum_xp, sum_xp);\n+ vfloat4 prod_yp = dot(sum_yp, sum_yp);\n+ vfloat4 prod_zp = dot(sum_zp, sum_zp);\nvfloat4 best_vector = sum_xp;\n- float best_sum = prod_xp;\n+ vfloat4 best_sum = prod_xp;\n- if (prod_yp > best_sum)\n- {\n- best_vector = sum_yp;\n- best_sum = prod_yp;\n- }\n+ vmask4 mask = prod_yp > best_sum;\n+ best_vector = select(best_vector, sum_yp, mask);\n+ best_sum = select(best_sum, prod_yp, mask);\n- if (prod_zp > best_sum)\n- {\n- best_vector = sum_zp;\n- }\n+ mask = prod_zp > best_sum;\n+ best_vector = select(best_vector, sum_zp, mask);\npm[partition].dir = best_vector;\n}\n@@ -246,38 +242,37 @@ void compute_avgs_and_dirs_3_comp_rgb(\nunsigned int iwt = texel_indexes[i];\nvfloat4 texel_datum = blk.texel3(iwt);\n-\n- texel_datum = (texel_datum - average) * texel_weight;\n+ texel_datum = texel_datum - average;\nvfloat4 zero = vfloat4::zero();\n- vmask4 tdm0 = vfloat4(texel_datum.lane<0>()) > zero;\n+ vmask4 tdm0 = texel_datum.swz<0,0,0,0>() > zero;\nsum_xp += select(zero, texel_datum, tdm0);\n- vmask4 tdm1 = vfloat4(texel_datum.lane<1>()) > zero;\n+ vmask4 tdm1 = texel_datum.swz<1,1,1,1>() > zero;\nsum_yp += select(zero, texel_datum, tdm1);\n- vmask4 tdm2 = vfloat4(texel_datum.lane<2>()) > zero;\n+ vmask4 tdm2 = texel_datum.swz<2,2,2,2>() > zero;\nsum_zp += select(zero, texel_datum, tdm2);\n}\n- float prod_xp = dot3_s(sum_xp, sum_xp);\n- float prod_yp = dot3_s(sum_yp, sum_yp);\n- float prod_zp = dot3_s(sum_zp, sum_zp);\n+ sum_xp = sum_xp * texel_weight;\n+ sum_yp = sum_yp * texel_weight;\n+ sum_zp = sum_zp * texel_weight;\n+\n+ vfloat4 prod_xp = dot(sum_xp, sum_xp);\n+ vfloat4 prod_yp = dot(sum_yp, sum_yp);\n+ vfloat4 prod_zp = dot(sum_zp, sum_zp);\nvfloat4 best_vector = sum_xp;\n- float best_sum = prod_xp;\n+ vfloat4 best_sum = prod_xp;\n- if (prod_yp > best_sum)\n- {\n- best_vector = sum_yp;\n- best_sum = prod_yp;\n- }\n+ vmask4 mask = prod_yp > best_sum;\n+ best_vector = select(best_vector, sum_yp, mask);\n+ best_sum = select(best_sum, prod_yp, mask);\n- if (prod_zp > best_sum)\n- {\n- best_vector = sum_zp;\n- }\n+ mask = prod_zp > best_sum;\n+ best_vector = select(best_vector, sum_zp, mask);\npm[partition].dir = best_vector;\n}\n@@ -346,27 +341,28 @@ void compute_avgs_and_dirs_2_comp(\n{\nunsigned int iwt = texel_indexes[i];\nvfloat4 texel_datum = vfloat2(data_vr[iwt], data_vg[iwt]);\n- texel_datum = (texel_datum - average) * texel_weight;\n+ texel_datum = texel_datum - average;\nvfloat4 zero = vfloat4::zero();\n- vmask4 tdm0 = vfloat4(texel_datum.lane<0>()) > zero;\n+ vmask4 tdm0 = texel_datum.swz<0,0,0,0>() > zero;\nsum_xp += select(zero, texel_datum, tdm0);\n- vmask4 tdm1 = vfloat4(texel_datum.lane<1>()) > zero;\n+ vmask4 tdm1 = texel_datum.swz<1,1,1,1>() > zero;\nsum_yp += select(zero, texel_datum, tdm1);\n}\n- float prod_xp = dot_s(sum_xp, sum_xp);\n- float prod_yp = dot_s(sum_yp, sum_yp);\n+ sum_xp = sum_xp * texel_weight;\n+ sum_yp = sum_yp * texel_weight;\n+\n+ vfloat4 prod_xp = dot(sum_xp, sum_xp);\n+ vfloat4 prod_yp = dot(sum_yp, sum_yp);\nvfloat4 best_vector = sum_xp;\n- float best_sum = prod_xp;\n+ vfloat4 best_sum = prod_xp;\n- if (prod_yp > best_sum)\n- {\n- best_vector = sum_yp;\n- }\n+ vmask4 mask = prod_yp > best_sum;\n+ best_vector = select(best_vector, sum_yp, mask);\npm[partition].dir = best_vector;\n}\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Vectorize resolve step in avgs_and_dirs
61,745
21.03.2022 21:29:57
0
430c8b9e783ba8de537a34c510fc7e20c9a86384
Vectorize resolve in compute_symbolic_block_diff
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_decompress_symbolic.cpp", "new_path": "Source/astcenc_decompress_symbolic.cpp", "diff": "@@ -351,7 +351,8 @@ float compute_symbolic_block_difference_2plane(\nvmask4 plane2_mask = vint4::lane_id() == vint4(scb.plane2_component);\n- float summa = 0.0f;\n+ vfloat4 summa = vfloat4::zero();\n+\n// Decode the color endpoints for this partition\nvint4 ep0;\nvint4 ep1;\n@@ -409,11 +410,10 @@ float compute_symbolic_block_difference_2plane(\nerror = min(abs(error), 1e15f);\nerror = error * error;\n- float metric = dot_s(error, blk.channel_weight);\n- summa += astc::min(metric, ERROR_CALC_DEFAULT);\n+ summa += min(dot(error, blk.channel_weight), ERROR_CALC_DEFAULT);\n}\n- return summa;\n+ return summa.lane<0>();\n}\n/* See header for documentation. */\n@@ -445,7 +445,7 @@ float compute_symbolic_block_difference_1plane(\nint plane1_weights[BLOCK_MAX_TEXELS];\nunpack_weights(bsd, scb, di, false, bm.get_weight_quant_mode(), plane1_weights, nullptr);\n- float summa = 0.0f;\n+ vfloat4 summa = vfloat4::zero();\nfor (unsigned int i = 0; i < partition_count; i++)\n{\n// Decode the color endpoints for this partition\n@@ -506,12 +506,11 @@ float compute_symbolic_block_difference_1plane(\nerror = min(abs(error), 1e15f);\nerror = error * error;\n- float metric = dot_s(error, blk.channel_weight);\n- summa += astc::min(metric, ERROR_CALC_DEFAULT);\n+ summa += min(dot(error, blk.channel_weight), ERROR_CALC_DEFAULT);\n}\n}\n- return summa;\n+ return summa.lane<0>();\n}\n/* See header for documentation. */\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Vectorize resolve in compute_symbolic_block_diff
61,745
22.03.2022 00:19:55
0
a06b905c08e2fc2323beccf3c35fbc7e646cf615
Enable more block_modes for 4x4 -fastest
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_entry.cpp", "new_path": "Source/astcenc_entry.cpp", "diff": "@@ -62,7 +62,7 @@ struct astcenc_preset_config\nstatic const std::array<astcenc_preset_config, 5> preset_configs_high {{\n{\nASTCENC_PRE_FASTEST,\n- 2, 8, 40, 2, 2, 85.2f, 63.2f, 3.5f, 3.5f, 1.0f, 1.0f, 0.5f, 25\n+ 2, 8, 42, 2, 2, 85.2f, 63.2f, 3.5f, 3.5f, 1.0f, 1.0f, 0.5f, 25\n}, {\nASTCENC_PRE_FAST,\n3, 12, 55, 3, 3, 85.2f, 63.2f, 3.5f, 3.5f, 1.0f, 1.1f, 0.65f, 20\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Enable more block_modes for 4x4 -fastest
61,745
01.04.2022 18:17:52
-3,600
f7111fa09b4333a17e39b5384d72eb6a38233aee
Remove unused deblock_weight array
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_internal.h", "new_path": "Source/astcenc_internal.h", "diff": "@@ -1396,9 +1396,6 @@ struct astcenc_context\n/** @brief The pixel region and variance worker arguments. */\navg_args avg_preprocess_args;\n- /** @brief The per-texel deblocking weights for the current block size. */\n- float deblock_weights[BLOCK_MAX_TEXELS];\n-\n/** @brief The parallel manager for averages computation. */\nParallelManager manage_avg;\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Remove unused deblock_weight array
61,745
01.04.2022 19:25:23
-3,600
139a54513e4000a8e8f6abd562fabbe444a8c06e
Remove unused parameter from compute_eci
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_compress_symbolic.cpp", "new_path": "Source/astcenc_compress_symbolic.cpp", "diff": "@@ -507,7 +507,7 @@ static float compress_symbolic_block_for_partition_1plane(\nquant_method color_quant_level_mod[TUNE_MAX_TRIAL_CANDIDATES];\nunsigned int candidate_count = compute_ideal_endpoint_formats(\n- bsd, pi, blk, ei.ep, qwt_bitcounts, qwt_errors,\n+ pi, blk, ei.ep, qwt_bitcounts, qwt_errors,\nconfig.tune_candidate_limit, max_block_modes,\npartition_format_specifiers, block_mode_index,\ncolor_quant_level, color_quant_level_mod, tmpbuf);\n@@ -886,7 +886,7 @@ static float compress_symbolic_block_for_partition_2planes(\nconst auto& pi = bsd.get_partition_info(1, 0);\nunsigned int candidate_count = compute_ideal_endpoint_formats(\n- bsd, pi, blk, epm, qwt_bitcounts, qwt_errors,\n+ pi, blk, epm, qwt_bitcounts, qwt_errors,\nconfig.tune_candidate_limit, bsd.block_mode_count,\npartition_format_specifiers, block_mode_index,\ncolor_quant_level, color_quant_level_mod, tmpbuf);\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_internal.h", "new_path": "Source/astcenc_internal.h", "diff": "@@ -2110,7 +2110,6 @@ void unpack_weights(\n* combination for each. The modified quantization level can be used when all formats are the same,\n* as this frees up two additional bits of storage.\n*\n- * @param bsd The block size information.\n* @param pi The partition info for the current trial.\n* @param blk The image block color data to compress.\n* @param ep The ideal endpoints.\n@@ -2127,7 +2126,6 @@ void unpack_weights(\n* @return The actual number of candidate matches returned.\n*/\nunsigned int compute_ideal_endpoint_formats(\n- const block_size_descriptor& bsd,\nconst partition_info& pi,\nconst image_block& blk,\nconst endpoints& ep,\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_pick_best_endpoint_format.cpp", "new_path": "Source/astcenc_pick_best_endpoint_format.cpp", "diff": "@@ -218,24 +218,19 @@ static void compute_error_squared_rgb_single_partition(\n* RGB-lumashift encoding (HDR only), luminance-encoding, and alpha drop. Also determines whether\n* the endpoints are eligible for offset encoding or blue-contraction\n*\n- * @param bsd The block size information.\n* @param blk The image block.\n* @param pi The partition info data.\n* @param ep The idealized endpoints.\n* @param[out] eci The resulting encoding choice error metrics.\n*/\nstatic void compute_encoding_choice_errors(\n- const block_size_descriptor& bsd,\nconst image_block& blk,\nconst partition_info& pi,\nconst endpoints& ep,\nencoding_choice_errors eci[BLOCK_MAX_PARTITIONS])\n{\nint partition_count = pi.partition_count;\n- int texels_per_block = bsd.texel_count;\n-\npromise(partition_count > 0);\n- promise(texels_per_block > 0);\npartition_metrics pms[BLOCK_MAX_PARTITIONS];\n@@ -1113,7 +1108,6 @@ static float four_partitions_find_best_combination_for_bitcount(\n/* See header for documentation. */\nunsigned int compute_ideal_endpoint_formats(\n- const block_size_descriptor& bsd,\nconst partition_info& pi,\nconst image_block& blk,\nconst endpoints& ep,\n@@ -1140,7 +1134,7 @@ unsigned int compute_ideal_endpoint_formats(\n// Compute the errors that result from various encoding choices (such as using luminance instead\n// of RGB, discarding Alpha, using RGB-scale in place of two separate RGB endpoints and so on)\nencoding_choice_errors eci[BLOCK_MAX_PARTITIONS];\n- compute_encoding_choice_errors(bsd, blk, pi, ep, eci);\n+ compute_encoding_choice_errors(blk, pi, ep, eci);\nfloat best_error[BLOCK_MAX_PARTITIONS][21][4];\nint format_of_choice[BLOCK_MAX_PARTITIONS][21][4];\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Remove unused parameter from compute_eci
61,745
03.04.2022 09:55:04
-3,600
0b5b2e62ab803618b8c5b1f2b80181a6d732bff3
Update main test reference
[ { "change_type": "ADD", "old_path": null, "new_path": "Test/Images/Small/astc_reference-main-avx2_fast_results.csv", "diff": "+Image Set,Block Size,Name,PSNR,Total Time,Coding Time,Coding Rate\n+Small,4x4,hdr-rgb-00.hdr,33.6884,0.1063,0.0155,4.2342\n+Small,4x4,ldr-rgb-00.png,38.1103,0.0148,0.0098,6.6853\n+Small,4x4,ldr-rgb-01.png,39.8105,0.0131,0.0085,7.7311\n+Small,4x4,ldr-rgb-02.png,35.0374,0.0182,0.0135,4.8707\n+Small,4x4,ldr-rgb-03.png,45.9704,0.0079,0.0033,19.7996\n+Small,4x4,ldr-rgb-04.png,41.9106,0.0109,0.0059,11.0686\n+Small,4x4,ldr-rgb-05.png,37.2317,0.0176,0.0128,5.1240\n+Small,4x4,ldr-rgb-06.png,35.2059,0.0173,0.0123,5.3220\n+Small,4x4,ldr-rgb-07.png,38.0309,0.0186,0.0134,4.8816\n+Small,4x4,ldr-rgb-08.png,43.6358,0.0100,0.0052,12.4973\n+Small,4x4,ldr-rgb-09.png,41.8532,0.0110,0.0063,10.4042\n+Small,4x4,ldr-rgb-10.png,44.3464,0.0045,0.0019,8.5335\n+Small,4x4,ldr-rgba-00.png,35.5182,0.0178,0.0126,5.2005\n+Small,4x4,ldr-rgba-01.png,38.7956,0.0128,0.0077,8.5345\n+Small,4x4,ldr-rgba-02.png,34.8333,0.0161,0.0111,5.9084\n+Small,4x4,ldr-xy-00.png,37.5698,0.0134,0.0061,10.6877\n+Small,4x4,ldr-xy-01.png,44.1378,0.0141,0.0071,9.2514\n+Small,4x4,ldr-xy-02.png,48.1786,0.0121,0.0046,14.1946\n+Small,4x4,ldrs-rgba-00.png,35.5254,0.0182,0.0129,5.0811\n+Small,4x4,ldrs-rgba-01.png,38.8141,0.0131,0.0080,8.1746\n+Small,4x4,ldrs-rgba-02.png,34.8386,0.0162,0.0112,5.8629\n+Small,5x5,hdr-rgb-00.hdr,29.4686,0.1133,0.0199,3.2913\n+Small,5x5,ldr-rgb-00.png,34.3042,0.0172,0.0113,5.8249\n+Small,5x5,ldr-rgb-01.png,36.1808,0.0141,0.0084,7.8469\n+Small,5x5,ldr-rgb-02.png,31.0160,0.0190,0.0134,4.8980\n+Small,5x5,ldr-rgb-03.png,42.6231,0.0088,0.0032,20.4294\n+Small,5x5,ldr-rgb-04.png,37.2434,0.0126,0.0066,9.8966\n+Small,5x5,ldr-rgb-05.png,33.1823,0.0214,0.0155,4.2148\n+Small,5x5,ldr-rgb-06.png,31.0247,0.0191,0.0131,5.0051\n+Small,5x5,ldr-rgb-07.png,35.1883,0.0192,0.0129,5.0673\n+Small,5x5,ldr-rgb-08.png,40.0310,0.0107,0.0049,13.2849\n+Small,5x5,ldr-rgb-09.png,37.2810,0.0127,0.0069,9.5216\n+Small,5x5,ldr-rgb-10.png,40.1234,0.0052,0.0018,9.2677\n+Small,5x5,ldr-rgba-00.png,32.0072,0.0218,0.0154,4.2432\n+Small,5x5,ldr-rgba-01.png,35.1428,0.0147,0.0088,7.4837\n+Small,5x5,ldr-rgba-02.png,31.1062,0.0194,0.0135,4.8704\n+Small,5x5,ldr-xy-00.png,36.7259,0.0143,0.0051,12.7453\n+Small,5x5,ldr-xy-01.png,39.8637,0.0150,0.0065,10.1513\n+Small,5x5,ldr-xy-02.png,43.9148,0.0135,0.0046,14.1028\n+Small,5x5,ldrs-rgba-00.png,32.0096,0.0219,0.0156,4.2042\n+Small,5x5,ldrs-rgba-01.png,35.1497,0.0148,0.0087,7.5295\n+Small,5x5,ldrs-rgba-02.png,31.1070,0.0195,0.0135,4.8672\n+Small,6x6,hdr-rgb-00.hdr,26.9904,0.1170,0.0232,2.8288\n+Small,6x6,ldr-rgb-00.png,31.5938,0.0213,0.0148,4.4171\n+Small,6x6,ldr-rgb-01.png,32.8896,0.0172,0.0108,6.0446\n+Small,6x6,ldr-rgb-02.png,27.4079,0.0232,0.0170,3.8507\n+Small,6x6,ldr-rgb-03.png,40.4899,0.0096,0.0035,18.6928\n+Small,6x6,ldr-rgb-04.png,33.9065,0.0154,0.0088,7.4295\n+Small,6x6,ldr-rgb-05.png,29.9329,0.0262,0.0197,3.3227\n+Small,6x6,ldr-rgb-06.png,27.4651,0.0228,0.0163,4.0211\n+Small,6x6,ldr-rgb-07.png,32.8824,0.0205,0.0137,4.7722\n+Small,6x6,ldr-rgb-08.png,37.6380,0.0114,0.0050,13.0888\n+Small,6x6,ldr-rgb-09.png,33.4716,0.0154,0.0090,7.3119\n+Small,6x6,ldr-rgb-10.png,36.6590,0.0064,0.0024,6.8047\n+Small,6x6,ldr-rgba-00.png,29.4788,0.0262,0.0192,3.4100\n+Small,6x6,ldr-rgba-01.png,32.0575,0.0175,0.0109,5.9861\n+Small,6x6,ldr-rgba-02.png,27.8275,0.0240,0.0174,3.7649\n+Small,6x6,ldr-xy-00.png,35.6827,0.0152,0.0058,11.3049\n+Small,6x6,ldr-xy-01.png,37.1053,0.0175,0.0087,7.5268\n+Small,6x6,ldr-xy-02.png,41.9441,0.0139,0.0047,13.8883\n+Small,6x6,ldrs-rgba-00.png,29.4790,0.0264,0.0195,3.3655\n+Small,6x6,ldrs-rgba-01.png,32.0640,0.0175,0.0108,6.0738\n+Small,6x6,ldrs-rgba-02.png,27.8273,0.0238,0.0173,3.7935\n+Small,8x8,hdr-rgb-00.hdr,23.7216,0.1290,0.0308,2.1281\n+Small,8x8,ldr-rgb-00.png,27.9814,0.0289,0.0184,3.5650\n+Small,8x8,ldr-rgb-01.png,28.7458,0.0245,0.0141,4.6506\n+Small,8x8,ldr-rgb-02.png,23.0937,0.0337,0.0231,2.8358\n+Small,8x8,ldr-rgb-03.png,37.2544,0.0141,0.0040,16.3229\n+Small,8x8,ldr-rgb-04.png,29.3419,0.0226,0.0123,5.3157\n+Small,8x8,ldr-rgb-05.png,25.7747,0.0369,0.0265,2.4767\n+Small,8x8,ldr-rgb-06.png,23.1468,0.0337,0.0230,2.8456\n+Small,8x8,ldr-rgb-07.png,29.6508,0.0235,0.0129,5.0716\n+Small,8x8,ldr-rgb-08.png,34.2150,0.0165,0.0063,10.3794\n+Small,8x8,ldr-rgb-09.png,28.6635,0.0239,0.0137,4.7893\n+Small,8x8,ldr-rgb-10.png,31.9974,0.0111,0.0031,5.3230\n+Small,8x8,ldr-rgba-00.png,25.3599,0.0356,0.0247,2.6536\n+Small,8x8,ldr-rgba-01.png,28.2399,0.0250,0.0145,4.5347\n+Small,8x8,ldr-rgba-02.png,23.9241,0.0348,0.0243,2.6999\n+Small,8x8,ldr-xy-00.png,33.3965,0.0222,0.0082,7.9825\n+Small,8x8,ldr-xy-01.png,34.2472,0.0230,0.0099,6.6365\n+Small,8x8,ldr-xy-02.png,39.9005,0.0171,0.0034,19.2856\n+Small,8x8,ldrs-rgba-00.png,25.3610,0.0358,0.0246,2.6674\n+Small,8x8,ldrs-rgba-01.png,28.2419,0.0253,0.0148,4.4221\n+Small,8x8,ldrs-rgba-02.png,23.9237,0.0346,0.0241,2.7237\n+Small,12x12,hdr-rgb-00.hdr,20.5637,0.1452,0.0400,1.6386\n+Small,12x12,ldr-rgb-00.png,23.7825,0.0318,0.0146,4.4838\n+Small,12x12,ldr-rgb-01.png,24.7588,0.0268,0.0099,6.6018\n+Small,12x12,ldr-rgb-02.png,19.2226,0.0427,0.0259,2.5343\n+Small,12x12,ldr-rgb-03.png,33.2364,0.0201,0.0035,18.5390\n+Small,12x12,ldr-rgb-04.png,24.5606,0.0270,0.0097,6.7293\n+Small,12x12,ldr-rgb-05.png,21.4885,0.0392,0.0218,3.0079\n+Small,12x12,ldr-rgb-06.png,19.2097,0.0447,0.0275,2.3800\n+Small,12x12,ldr-rgb-07.png,25.3456,0.0259,0.0084,7.7834\n+Small,12x12,ldr-rgb-08.png,30.1535,0.0214,0.0044,15.0593\n+Small,12x12,ldr-rgb-09.png,23.7136,0.0309,0.0140,4.6959\n+Small,12x12,ldr-rgb-10.png,27.3052,0.0182,0.0035,4.6091\n+Small,12x12,ldr-rgba-00.png,21.3632,0.0423,0.0251,2.6140\n+Small,12x12,ldr-rgba-01.png,24.4867,0.0291,0.0118,5.5402\n+Small,12x12,ldr-rgba-02.png,20.1666,0.0490,0.0318,2.0589\n+Small,12x12,ldr-xy-00.png,29.0651,0.0287,0.0072,9.0758\n+Small,12x12,ldr-xy-01.png,30.4241,0.0279,0.0066,9.9134\n+Small,12x12,ldr-xy-02.png,37.9951,0.0228,0.0018,35.6198\n+Small,12x12,ldrs-rgba-00.png,21.3658,0.0424,0.0249,2.6341\n+Small,12x12,ldrs-rgba-01.png,24.4885,0.0288,0.0112,5.8452\n+Small,12x12,ldrs-rgba-02.png,20.1665,0.0489,0.0320,2.0465\n+Small,3x3x3,ldr-l-00-3.dds,50.7548,0.0180,0.0117,22.4803\n+Small,3x3x3,ldr-l-01-3.dds,53.8643,0.0088,0.0049,13.9914\n+Small,6x6x6,ldr-l-00-3.dds,32.5264,0.0626,0.0403,6.4979\n+Small,6x6x6,ldr-l-01-3.dds,40.7893,0.0370,0.0165,4.1864\n" }, { "change_type": "ADD", "old_path": null, "new_path": "Test/Images/Small/astc_reference-main-avx2_fastest_results.csv", "diff": "+Image Set,Block Size,Name,PSNR,Total Time,Coding Time,Coding Rate\n+Small,4x4,hdr-rgb-00.hdr,33.2999,0.0992,0.0097,6.7584\n+Small,4x4,ldr-rgb-00.png,37.0084,0.0101,0.0057,11.5850\n+Small,4x4,ldr-rgb-01.png,39.4162,0.0097,0.0055,11.9263\n+Small,4x4,ldr-rgb-02.png,34.6448,0.0123,0.0081,8.0550\n+Small,4x4,ldr-rgb-03.png,45.3559,0.0067,0.0027,24.0931\n+Small,4x4,ldr-rgb-04.png,41.6390,0.0086,0.0042,15.7127\n+Small,4x4,ldr-rgb-05.png,36.7213,0.0124,0.0080,8.1615\n+Small,4x4,ldr-rgb-06.png,34.6994,0.0119,0.0075,8.7899\n+Small,4x4,ldr-rgb-07.png,37.1807,0.0122,0.0074,8.8119\n+Small,4x4,ldr-rgb-08.png,42.6915,0.0077,0.0036,18.3828\n+Small,4x4,ldr-rgb-09.png,41.6433,0.0088,0.0043,15.2017\n+Small,4x4,ldr-rgb-10.png,43.8818,0.0035,0.0015,10.5628\n+Small,4x4,ldr-rgba-00.png,34.5700,0.0117,0.0069,9.4966\n+Small,4x4,ldr-rgba-01.png,38.5857,0.0102,0.0057,11.4015\n+Small,4x4,ldr-rgba-02.png,34.5224,0.0127,0.0082,7.9449\n+Small,4x4,ldr-xy-00.png,37.5264,0.0109,0.0041,15.8532\n+Small,4x4,ldr-xy-01.png,43.8563,0.0112,0.0048,13.7936\n+Small,4x4,ldr-xy-02.png,48.1786,0.0105,0.0037,17.5003\n+Small,4x4,ldrs-rgba-00.png,34.5750,0.0118,0.0071,9.2096\n+Small,4x4,ldrs-rgba-01.png,38.6025,0.0104,0.0060,10.8699\n+Small,4x4,ldrs-rgba-02.png,34.5263,0.0125,0.0080,8.1583\n+Small,5x5,hdr-rgb-00.hdr,28.8076,0.1038,0.0118,5.5319\n+Small,5x5,ldr-rgb-00.png,33.4478,0.0117,0.0067,9.8417\n+Small,5x5,ldr-rgb-01.png,35.9999,0.0105,0.0056,11.6780\n+Small,5x5,ldr-rgb-02.png,30.8748,0.0139,0.0091,7.1939\n+Small,5x5,ldr-rgb-03.png,42.2547,0.0071,0.0025,26.2238\n+Small,5x5,ldr-rgb-04.png,36.9224,0.0103,0.0052,12.5212\n+Small,5x5,ldr-rgb-05.png,32.6193,0.0147,0.0098,6.6914\n+Small,5x5,ldr-rgb-06.png,30.8711,0.0142,0.0092,7.1336\n+Small,5x5,ldr-rgb-07.png,34.2652,0.0137,0.0084,7.7770\n+Small,5x5,ldr-rgb-08.png,39.3139,0.0083,0.0034,19.1180\n+Small,5x5,ldr-rgb-09.png,36.9835,0.0098,0.0049,13.3229\n+Small,5x5,ldr-rgb-10.png,39.7373,0.0042,0.0016,10.1917\n+Small,5x5,ldr-rgba-00.png,30.7473,0.0136,0.0082,7.9953\n+Small,5x5,ldr-rgba-01.png,34.9846,0.0116,0.0067,9.8463\n+Small,5x5,ldr-rgba-02.png,31.0020,0.0150,0.0100,6.5530\n+Small,5x5,ldr-xy-00.png,36.5566,0.0122,0.0043,15.3623\n+Small,5x5,ldr-xy-01.png,38.8331,0.0121,0.0047,13.9172\n+Small,5x5,ldr-xy-02.png,43.7233,0.0116,0.0040,16.5579\n+Small,5x5,ldrs-rgba-00.png,30.7497,0.0139,0.0085,7.7367\n+Small,5x5,ldrs-rgba-01.png,34.9913,0.0116,0.0065,10.1259\n+Small,5x5,ldrs-rgba-02.png,31.0030,0.0153,0.0101,6.4632\n+Small,6x6,hdr-rgb-00.hdr,26.7122,0.1055,0.0123,5.3208\n+Small,6x6,ldr-rgb-00.png,31.1052,0.0129,0.0075,8.7172\n+Small,6x6,ldr-rgb-01.png,32.7949,0.0113,0.0062,10.5706\n+Small,6x6,ldr-rgb-02.png,27.3228,0.0151,0.0098,6.7038\n+Small,6x6,ldr-rgb-03.png,40.2037,0.0076,0.0026,25.4211\n+Small,6x6,ldr-rgb-04.png,33.7349,0.0108,0.0055,11.8702\n+Small,6x6,ldr-rgb-05.png,29.5304,0.0159,0.0104,6.2737\n+Small,6x6,ldr-rgb-06.png,27.3791,0.0154,0.0100,6.5536\n+Small,6x6,ldr-rgb-07.png,32.0586,0.0139,0.0080,8.2435\n+Small,6x6,ldr-rgb-08.png,37.0422,0.0086,0.0034,19.2236\n+Small,6x6,ldr-rgb-09.png,33.2804,0.0107,0.0055,12.0165\n+Small,6x6,ldr-rgb-10.png,36.4903,0.0047,0.0019,8.7593\n+Small,6x6,ldr-rgba-00.png,28.4787,0.0152,0.0094,6.9556\n+Small,6x6,ldr-rgba-01.png,31.9580,0.0124,0.0069,9.5311\n+Small,6x6,ldr-rgba-02.png,27.7571,0.0159,0.0103,6.3332\n+Small,6x6,ldr-xy-00.png,35.5281,0.0121,0.0042,15.5959\n+Small,6x6,ldr-xy-01.png,36.2504,0.0127,0.0051,12.9336\n+Small,6x6,ldr-xy-02.png,41.7325,0.0113,0.0034,19.1286\n+Small,6x6,ldrs-rgba-00.png,28.4800,0.0153,0.0097,6.7286\n+Small,6x6,ldrs-rgba-01.png,31.9643,0.0124,0.0069,9.4966\n+Small,6x6,ldrs-rgba-02.png,27.7547,0.0160,0.0107,6.1283\n+Small,8x8,hdr-rgb-00.hdr,23.5547,0.1164,0.0202,3.2378\n+Small,8x8,ldr-rgb-00.png,27.6188,0.0204,0.0117,5.5938\n+Small,8x8,ldr-rgb-01.png,28.6671,0.0184,0.0098,6.6547\n+Small,8x8,ldr-rgb-02.png,23.0179,0.0245,0.0160,4.1081\n+Small,8x8,ldr-rgb-03.png,37.0089,0.0117,0.0033,19.9984\n+Small,8x8,ldr-rgb-04.png,29.0953,0.0178,0.0089,7.3860\n+Small,8x8,ldr-rgb-05.png,25.5324,0.0249,0.0161,4.0693\n+Small,8x8,ldr-rgb-06.png,23.0523,0.0251,0.0160,4.1027\n+Small,8x8,ldr-rgb-07.png,29.3368,0.0182,0.0089,7.3884\n+Small,8x8,ldr-rgb-08.png,33.7665,0.0131,0.0044,14.7696\n+Small,8x8,ldr-rgb-09.png,28.4590,0.0175,0.0087,7.4907\n+Small,8x8,ldr-rgb-10.png,31.8847,0.0090,0.0027,6.1041\n+Small,8x8,ldr-rgba-00.png,24.9726,0.0243,0.0153,4.2963\n+Small,8x8,ldr-rgba-01.png,28.1236,0.0192,0.0104,6.2973\n+Small,8x8,ldr-rgba-02.png,23.8676,0.0258,0.0169,3.8735\n+Small,8x8,ldr-xy-00.png,33.2085,0.0180,0.0060,10.9118\n+Small,8x8,ldr-xy-01.png,33.9809,0.0184,0.0068,9.6067\n+Small,8x8,ldr-xy-02.png,39.7786,0.0147,0.0030,21.5641\n+Small,8x8,ldrs-rgba-00.png,24.9730,0.0245,0.0154,4.2680\n+Small,8x8,ldrs-rgba-01.png,28.1260,0.0195,0.0108,6.0630\n+Small,8x8,ldrs-rgba-02.png,23.8671,0.0265,0.0177,3.6924\n+Small,12x12,hdr-rgb-00.hdr,20.4789,0.1258,0.0250,2.6237\n+Small,12x12,ldr-rgb-00.png,23.6716,0.0229,0.0100,6.5608\n+Small,12x12,ldr-rgb-01.png,24.7037,0.0197,0.0068,9.6263\n+Small,12x12,ldr-rgb-02.png,19.1880,0.0298,0.0168,3.8965\n+Small,12x12,ldr-rgb-03.png,33.1400,0.0153,0.0027,24.1290\n+Small,12x12,ldr-rgb-04.png,24.3898,0.0202,0.0069,9.4678\n+Small,12x12,ldr-rgb-05.png,21.3701,0.0277,0.0147,4.4551\n+Small,12x12,ldr-rgb-06.png,19.1564,0.0316,0.0185,3.5464\n+Small,12x12,ldr-rgb-07.png,25.2598,0.0197,0.0064,10.2916\n+Small,12x12,ldr-rgb-08.png,29.9653,0.0166,0.0035,18.7247\n+Small,12x12,ldr-rgb-09.png,23.5968,0.0224,0.0097,6.7273\n+Small,12x12,ldr-rgb-10.png,27.2554,0.0130,0.0025,6.4868\n+Small,12x12,ldr-rgba-00.png,21.1828,0.0287,0.0152,4.3124\n+Small,12x12,ldr-rgba-01.png,24.4330,0.0218,0.0088,7.4573\n+Small,12x12,ldr-rgba-02.png,20.1371,0.0351,0.0220,2.9854\n+Small,12x12,ldr-xy-00.png,28.9405,0.0228,0.0058,11.2821\n+Small,12x12,ldr-xy-01.png,29.5972,0.0219,0.0056,11.7660\n+Small,12x12,ldr-xy-02.png,37.9716,0.0184,0.0019,35.2724\n+Small,12x12,ldrs-rgba-00.png,21.1836,0.0291,0.0158,4.1460\n+Small,12x12,ldrs-rgba-01.png,24.4339,0.0218,0.0089,7.3512\n+Small,12x12,ldrs-rgba-02.png,20.1367,0.0352,0.0222,2.9486\n+Small,3x3x3,ldr-l-00-3.dds,50.6002,0.0160,0.0106,24.7582\n+Small,3x3x3,ldr-l-01-3.dds,53.7952,0.0077,0.0045,15.2217\n+Small,6x6x6,ldr-l-00-3.dds,32.5074,0.0602,0.0398,6.5857\n+Small,6x6x6,ldr-l-01-3.dds,40.7826,0.0341,0.0160,4.2987\n" }, { "change_type": "ADD", "old_path": null, "new_path": "Test/Images/Small/astc_reference-main-avx2_medium_results.csv", "diff": "+Image Set,Block Size,Name,PSNR,Total Time,Coding Time,Coding Rate\n+Small,4x4,hdr-rgb-00.hdr,34.1340,0.1196,0.0273,2.4014\n+Small,4x4,ldr-rgb-00.png,38.7318,0.0343,0.0285,2.3006\n+Small,4x4,ldr-rgb-01.png,40.1460,0.0315,0.0260,2.5244\n+Small,4x4,ldr-rgb-02.png,35.2180,0.0326,0.0270,2.4302\n+Small,4x4,ldr-rgb-03.png,47.1925,0.0198,0.0144,4.5653\n+Small,4x4,ldr-rgb-04.png,42.1729,0.0270,0.0212,3.0947\n+Small,4x4,ldr-rgb-05.png,37.7672,0.0347,0.0289,2.2641\n+Small,4x4,ldr-rgb-06.png,35.3468,0.0295,0.0236,2.7724\n+Small,4x4,ldr-rgb-07.png,39.2465,0.0405,0.0344,1.9069\n+Small,4x4,ldr-rgb-08.png,45.1731,0.0232,0.0175,3.7470\n+Small,4x4,ldr-rgb-09.png,42.0914,0.0275,0.0218,3.0119\n+Small,4x4,ldr-rgb-10.png,44.8767,0.0070,0.0036,4.4780\n+Small,4x4,ldr-rgba-00.png,36.2349,0.0369,0.0307,2.1347\n+Small,4x4,ldr-rgba-01.png,38.8941,0.0239,0.0178,3.6901\n+Small,4x4,ldr-rgba-02.png,34.9204,0.0262,0.0203,3.2236\n+Small,4x4,ldr-xy-00.png,37.7446,0.0243,0.0167,3.9292\n+Small,4x4,ldr-xy-01.png,45.1471,0.0329,0.0257,2.5483\n+Small,4x4,ldr-xy-02.png,50.9225,0.0394,0.0316,2.0714\n+Small,4x4,ldrs-rgba-00.png,36.2418,0.0374,0.0310,2.1118\n+Small,4x4,ldrs-rgba-01.png,38.9101,0.0239,0.0179,3.6543\n+Small,4x4,ldrs-rgba-02.png,34.9259,0.0265,0.0205,3.2047\n+Small,5x5,hdr-rgb-00.hdr,29.8514,0.1314,0.0373,1.7546\n+Small,5x5,ldr-rgb-00.png,35.0732,0.0417,0.0342,1.9145\n+Small,5x5,ldr-rgb-01.png,36.3975,0.0352,0.0281,2.3336\n+Small,5x5,ldr-rgb-02.png,31.0670,0.0373,0.0299,2.1945\n+Small,5x5,ldr-rgb-03.png,43.9478,0.0171,0.0099,6.6292\n+Small,5x5,ldr-rgb-04.png,37.6668,0.0328,0.0254,2.5753\n+Small,5x5,ldr-rgb-05.png,33.5193,0.0467,0.0394,1.6636\n+Small,5x5,ldr-rgb-06.png,31.0773,0.0354,0.0279,2.3454\n+Small,5x5,ldr-rgb-07.png,36.2042,0.0500,0.0423,1.5477\n+Small,5x5,ldr-rgb-08.png,41.5462,0.0248,0.0175,3.7490\n+Small,5x5,ldr-rgb-09.png,37.5873,0.0308,0.0235,2.7904\n+Small,5x5,ldr-rgb-10.png,40.4902,0.0095,0.0045,3.6037\n+Small,5x5,ldr-rgba-00.png,32.7858,0.0495,0.0417,1.5710\n+Small,5x5,ldr-rgba-01.png,35.2722,0.0298,0.0223,2.9446\n+Small,5x5,ldr-rgba-02.png,31.1225,0.0345,0.0269,2.4318\n+Small,5x5,ldr-xy-00.png,37.0727,0.0268,0.0176,3.7211\n+Small,5x5,ldr-xy-01.png,40.7193,0.0349,0.0261,2.5116\n+Small,5x5,ldr-xy-02.png,49.0675,0.0280,0.0186,3.5233\n+Small,5x5,ldrs-rgba-00.png,32.7887,0.0500,0.0423,1.5511\n+Small,5x5,ldrs-rgba-01.png,35.2789,0.0295,0.0222,2.9569\n+Small,5x5,ldrs-rgba-02.png,31.1234,0.0346,0.0270,2.4313\n+Small,6x6,hdr-rgb-00.hdr,27.4769,0.1381,0.0428,1.5325\n+Small,6x6,ldr-rgb-00.png,32.3560,0.0450,0.0370,1.7719\n+Small,6x6,ldr-rgb-01.png,33.0687,0.0374,0.0297,2.2094\n+Small,6x6,ldr-rgb-02.png,27.4381,0.0408,0.0331,1.9804\n+Small,6x6,ldr-rgb-03.png,41.6538,0.0156,0.0080,8.1992\n+Small,6x6,ldr-rgb-04.png,34.2059,0.0340,0.0263,2.4921\n+Small,6x6,ldr-rgb-05.png,30.1377,0.0517,0.0438,1.4958\n+Small,6x6,ldr-rgb-06.png,27.5024,0.0404,0.0324,2.0197\n+Small,6x6,ldr-rgb-07.png,33.8577,0.0496,0.0408,1.6045\n+Small,6x6,ldr-rgb-08.png,39.0752,0.0216,0.0140,4.6965\n+Small,6x6,ldr-rgb-09.png,33.6699,0.0328,0.0252,2.6056\n+Small,6x6,ldr-rgb-10.png,36.9333,0.0111,0.0056,2.8909\n+Small,6x6,ldr-rgba-00.png,30.2558,0.0565,0.0481,1.3618\n+Small,6x6,ldr-rgba-01.png,32.1716,0.0322,0.0240,2.7252\n+Small,6x6,ldr-rgba-02.png,27.8426,0.0397,0.0317,2.0671\n+Small,6x6,ldr-xy-00.png,36.2518,0.0264,0.0164,3.9934\n+Small,6x6,ldr-xy-01.png,37.7329,0.0296,0.0203,3.2235\n+Small,6x6,ldr-xy-02.png,46.0638,0.0233,0.0133,4.9102\n+Small,6x6,ldrs-rgba-00.png,30.2570,0.0563,0.0480,1.3666\n+Small,6x6,ldrs-rgba-01.png,32.1777,0.0324,0.0243,2.6989\n+Small,6x6,ldrs-rgba-02.png,27.8427,0.0394,0.0314,2.0875\n+Small,8x8,hdr-rgb-00.hdr,24.1157,0.1540,0.0543,1.2065\n+Small,8x8,ldr-rgb-00.png,28.7123,0.0612,0.0497,1.3198\n+Small,8x8,ldr-rgb-01.png,28.9240,0.0482,0.0369,1.7780\n+Small,8x8,ldr-rgb-02.png,23.1517,0.0555,0.0442,1.4837\n+Small,8x8,ldr-rgb-03.png,38.4615,0.0205,0.0094,7.0077\n+Small,8x8,ldr-rgb-04.png,29.6546,0.0457,0.0344,1.9039\n+Small,8x8,ldr-rgb-05.png,25.9295,0.0631,0.0516,1.2709\n+Small,8x8,ldr-rgb-06.png,23.1918,0.0553,0.0437,1.4993\n+Small,8x8,ldr-rgb-07.png,30.5823,0.0559,0.0440,1.4887\n+Small,8x8,ldr-rgb-08.png,35.7745,0.0272,0.0161,4.0790\n+Small,8x8,ldr-rgb-09.png,29.0115,0.0432,0.0321,2.0435\n+Small,8x8,ldr-rgb-10.png,32.1619,0.0165,0.0073,2.2378\n+Small,8x8,ldr-rgba-00.png,26.3940,0.0711,0.0591,1.1084\n+Small,8x8,ldr-rgba-01.png,28.3383,0.0426,0.0310,2.1149\n+Small,8x8,ldr-rgba-02.png,23.9368,0.0585,0.0468,1.4014\n+Small,8x8,ldr-xy-00.png,34.0144,0.0385,0.0236,2.7815\n+Small,8x8,ldr-xy-01.png,34.9127,0.0376,0.0230,2.8443\n+Small,8x8,ldr-xy-02.png,41.7218,0.0288,0.0139,4.7288\n+Small,8x8,ldrs-rgba-00.png,26.3943,0.0711,0.0592,1.1067\n+Small,8x8,ldrs-rgba-01.png,28.3412,0.0421,0.0306,2.1394\n+Small,8x8,ldrs-rgba-02.png,23.9363,0.0590,0.0473,1.3852\n+Small,12x12,hdr-rgb-00.hdr,20.7645,0.1795,0.0715,0.9162\n+Small,12x12,ldr-rgb-00.png,24.6670,0.0758,0.0558,1.1744\n+Small,12x12,ldr-rgb-01.png,25.0227,0.0627,0.0432,1.5186\n+Small,12x12,ldr-rgb-02.png,19.2621,0.0839,0.0640,1.0244\n+Small,12x12,ldr-rgb-03.png,34.9308,0.0287,0.0100,6.5622\n+Small,12x12,ldr-rgb-04.png,24.9126,0.0598,0.0399,1.6423\n+Small,12x12,ldr-rgb-05.png,21.6505,0.0863,0.0662,0.9896\n+Small,12x12,ldr-rgb-06.png,19.2451,0.0805,0.0607,1.0796\n+Small,12x12,ldr-rgb-07.png,26.5693,0.0632,0.0429,1.5265\n+Small,12x12,ldr-rgb-08.png,31.4927,0.0365,0.0165,3.9779\n+Small,12x12,ldr-rgb-09.png,24.1078,0.0598,0.0405,1.6183\n+Small,12x12,ldr-rgb-10.png,27.9831,0.0321,0.0147,1.1085\n+Small,12x12,ldr-rgba-00.png,22.0878,0.0843,0.0639,1.0259\n+Small,12x12,ldr-rgba-01.png,24.6267,0.0600,0.0400,1.6367\n+Small,12x12,ldr-rgba-02.png,20.1804,0.0909,0.0708,0.9254\n+Small,12x12,ldr-xy-00.png,30.0722,0.0496,0.0254,2.5766\n+Small,12x12,ldr-xy-01.png,31.8391,0.0480,0.0244,2.6836\n+Small,12x12,ldr-xy-02.png,38.5216,0.0297,0.0057,11.5340\n+Small,12x12,ldrs-rgba-00.png,22.0882,0.0850,0.0647,1.0137\n+Small,12x12,ldrs-rgba-01.png,24.6285,0.0602,0.0402,1.6313\n+Small,12x12,ldrs-rgba-02.png,20.1803,0.0913,0.0713,0.9192\n+Small,3x3x3,ldr-l-00-3.dds,51.9676,0.0249,0.0173,15.1845\n+Small,3x3x3,ldr-l-01-3.dds,54.3363,0.0100,0.0052,13.1884\n+Small,6x6x6,ldr-l-00-3.dds,32.9129,0.0736,0.0508,5.1627\n+Small,6x6x6,ldr-l-01-3.dds,40.8871,0.0375,0.0171,4.0394\n" }, { "change_type": "ADD", "old_path": null, "new_path": "Test/Images/Small/astc_reference-main-avx2_thorough_results.csv", "diff": "+Image Set,Block Size,Name,PSNR,Total Time,Coding Time,Coding Rate\n+Small,4x4,hdr-rgb-00.hdr,34.3787,0.1484,0.0555,1.1816\n+Small,4x4,ldr-rgb-00.png,39.1025,0.0668,0.0606,1.0814\n+Small,4x4,ldr-rgb-01.png,40.3344,0.0656,0.0599,1.0934\n+Small,4x4,ldr-rgb-02.png,35.3612,0.0620,0.0562,1.1664\n+Small,4x4,ldr-rgb-03.png,47.6672,0.0606,0.0550,1.1914\n+Small,4x4,ldr-rgb-04.png,42.3135,0.0604,0.0544,1.2049\n+Small,4x4,ldr-rgb-05.png,37.9467,0.0696,0.0637,1.0287\n+Small,4x4,ldr-rgb-06.png,35.4785,0.0573,0.0513,1.2766\n+Small,4x4,ldr-rgb-07.png,39.8676,0.0761,0.0697,0.9402\n+Small,4x4,ldr-rgb-08.png,45.7996,0.0584,0.0526,1.2451\n+Small,4x4,ldr-rgb-09.png,42.2360,0.0635,0.0577,1.1362\n+Small,4x4,ldr-rgb-10.png,44.9865,0.0110,0.0075,2.1545\n+Small,4x4,ldr-rgba-00.png,36.7343,0.0689,0.0626,1.0469\n+Small,4x4,ldr-rgba-01.png,39.0299,0.0569,0.0507,1.2935\n+Small,4x4,ldr-rgba-02.png,34.9857,0.0553,0.0493,1.3300\n+Small,4x4,ldr-xy-00.png,37.7553,0.0653,0.0576,1.1374\n+Small,4x4,ldr-xy-01.png,45.2174,0.0728,0.0655,1.0003\n+Small,4x4,ldr-xy-02.png,50.9988,0.0878,0.0798,0.8213\n+Small,4x4,ldrs-rgba-00.png,36.7417,0.0694,0.0631,1.0391\n+Small,4x4,ldrs-rgba-01.png,39.0514,0.0573,0.0512,1.2798\n+Small,4x4,ldrs-rgba-02.png,34.9915,0.0559,0.0499,1.3140\n+Small,5x5,hdr-rgb-00.hdr,30.2499,0.1629,0.0676,0.9696\n+Small,5x5,ldr-rgb-00.png,35.3184,0.0820,0.0743,0.8815\n+Small,5x5,ldr-rgb-01.png,36.4942,0.0784,0.0711,0.9222\n+Small,5x5,ldr-rgb-02.png,31.1169,0.0742,0.0668,0.9809\n+Small,5x5,ldr-rgb-03.png,44.4730,0.0718,0.0648,1.0121\n+Small,5x5,ldr-rgb-04.png,37.8146,0.0736,0.0661,0.9916\n+Small,5x5,ldr-rgb-05.png,33.6567,0.0837,0.0762,0.8596\n+Small,5x5,ldr-rgb-06.png,31.1197,0.0706,0.0631,1.0392\n+Small,5x5,ldr-rgb-07.png,36.7056,0.0942,0.0864,0.7589\n+Small,5x5,ldr-rgb-08.png,42.2683,0.0685,0.0611,1.0721\n+Small,5x5,ldr-rgb-09.png,37.6844,0.0769,0.0695,0.9425\n+Small,5x5,ldr-rgb-10.png,40.6793,0.0151,0.0098,1.6517\n+Small,5x5,ldr-rgba-00.png,33.1233,0.0853,0.0775,0.8452\n+Small,5x5,ldr-rgba-01.png,35.3456,0.0705,0.0629,1.0414\n+Small,5x5,ldr-rgba-02.png,31.1599,0.0698,0.0623,1.0522\n+Small,5x5,ldr-xy-00.png,37.2902,0.0723,0.0631,1.0390\n+Small,5x5,ldr-xy-01.png,41.5200,0.0884,0.0794,0.8251\n+Small,5x5,ldr-xy-02.png,49.2646,0.0992,0.0898,0.7297\n+Small,5x5,ldrs-rgba-00.png,33.1269,0.0857,0.0779,0.8412\n+Small,5x5,ldrs-rgba-01.png,35.3541,0.0709,0.0632,1.0368\n+Small,5x5,ldrs-rgba-02.png,31.1602,0.0705,0.0631,1.0394\n+Small,6x6,hdr-rgb-00.hdr,27.6566,0.1752,0.0800,0.8195\n+Small,6x6,ldr-rgb-00.png,32.6208,0.0956,0.0873,0.7509\n+Small,6x6,ldr-rgb-01.png,33.1469,0.0885,0.0807,0.8119\n+Small,6x6,ldr-rgb-02.png,27.4754,0.0853,0.0773,0.8483\n+Small,6x6,ldr-rgb-03.png,42.5226,0.0611,0.0533,1.2294\n+Small,6x6,ldr-rgb-04.png,34.3223,0.0839,0.0758,0.8644\n+Small,6x6,ldr-rgb-05.png,30.2576,0.0977,0.0896,0.7312\n+Small,6x6,ldr-rgb-06.png,27.5394,0.0840,0.0758,0.8646\n+Small,6x6,ldr-rgb-07.png,34.3854,0.1026,0.0942,0.6960\n+Small,6x6,ldr-rgb-08.png,39.8830,0.0600,0.0522,1.2563\n+Small,6x6,ldr-rgb-09.png,33.7955,0.0823,0.0741,0.8841\n+Small,6x6,ldr-rgb-10.png,37.1039,0.0186,0.0129,1.2635\n+Small,6x6,ldr-rgba-00.png,30.5063,0.1018,0.0931,0.7038\n+Small,6x6,ldr-rgba-01.png,32.2316,0.0844,0.0761,0.8610\n+Small,6x6,ldr-rgba-02.png,27.8753,0.0861,0.0780,0.8406\n+Small,6x6,ldr-xy-00.png,36.3838,0.0616,0.0517,1.2681\n+Small,6x6,ldr-xy-01.png,38.0618,0.0874,0.0778,0.8427\n+Small,6x6,ldr-xy-02.png,47.4735,0.1078,0.0976,0.6712\n+Small,6x6,ldrs-rgba-00.png,30.5081,0.1021,0.0937,0.6997\n+Small,6x6,ldrs-rgba-01.png,32.2377,0.0842,0.0760,0.8625\n+Small,6x6,ldrs-rgba-02.png,27.8745,0.0867,0.0786,0.8337\n+Small,8x8,hdr-rgb-00.hdr,24.3160,0.2024,0.1026,0.6390\n+Small,8x8,ldr-rgb-00.png,28.9377,0.1215,0.1080,0.6070\n+Small,8x8,ldr-rgb-01.png,28.9883,0.1074,0.0946,0.6931\n+Small,8x8,ldr-rgb-02.png,23.1866,0.1130,0.1001,0.6545\n+Small,8x8,ldr-rgb-03.png,39.3501,0.0416,0.0288,2.2734\n+Small,8x8,ldr-rgb-04.png,29.7749,0.1013,0.0879,0.7452\n+Small,8x8,ldr-rgb-05.png,26.0242,0.1193,0.1062,0.6173\n+Small,8x8,ldr-rgb-06.png,23.2335,0.1113,0.0982,0.6673\n+Small,8x8,ldr-rgb-07.png,31.1039,0.1148,0.1011,0.6479\n+Small,8x8,ldr-rgb-08.png,36.4625,0.0588,0.0457,1.4345\n+Small,8x8,ldr-rgb-09.png,29.1520,0.0869,0.0738,0.8878\n+Small,8x8,ldr-rgb-10.png,32.3031,0.0281,0.0174,0.9332\n+Small,8x8,ldr-rgba-00.png,26.6704,0.1271,0.1136,0.5767\n+Small,8x8,ldr-rgba-01.png,28.3932,0.1004,0.0870,0.7534\n+Small,8x8,ldr-rgba-02.png,23.9599,0.1194,0.1062,0.6169\n+Small,8x8,ldr-xy-00.png,34.3120,0.0725,0.0574,1.1426\n+Small,8x8,ldr-xy-01.png,35.2097,0.0750,0.0600,1.0923\n+Small,8x8,ldr-xy-02.png,44.5881,0.0956,0.0805,0.8146\n+Small,8x8,ldrs-rgba-00.png,26.6715,0.1276,0.1141,0.5746\n+Small,8x8,ldrs-rgba-01.png,28.3959,0.1001,0.0869,0.7543\n+Small,8x8,ldrs-rgba-02.png,23.9597,0.1195,0.1061,0.6177\n+Small,12x12,hdr-rgb-00.hdr,21.0019,0.2435,0.1336,0.4906\n+Small,12x12,ldr-rgb-00.png,25.0133,0.1529,0.1299,0.5043\n+Small,12x12,ldr-rgb-01.png,25.1167,0.1283,0.1053,0.6223\n+Small,12x12,ldr-rgb-02.png,19.2912,0.1531,0.1304,0.5026\n+Small,12x12,ldr-rgb-03.png,36.1206,0.0513,0.0286,2.2900\n+Small,12x12,ldr-rgb-04.png,25.0074,0.1265,0.1036,0.6327\n+Small,12x12,ldr-rgb-05.png,21.7140,0.1515,0.1286,0.5095\n+Small,12x12,ldr-rgb-06.png,19.2810,0.1485,0.1253,0.5230\n+Small,12x12,ldr-rgb-07.png,27.0419,0.1370,0.1138,0.5760\n+Small,12x12,ldr-rgb-08.png,32.3840,0.0679,0.0451,1.4543\n+Small,12x12,ldr-rgb-09.png,24.3053,0.1206,0.0976,0.6715\n+Small,12x12,ldr-rgb-10.png,28.1356,0.0483,0.0283,0.5740\n+Small,12x12,ldr-rgba-00.png,22.7010,0.1623,0.1389,0.4717\n+Small,12x12,ldr-rgba-01.png,24.7078,0.1210,0.0979,0.6697\n+Small,12x12,ldr-rgba-02.png,20.1958,0.1560,0.1330,0.4928\n+Small,12x12,ldr-xy-00.png,30.5742,0.0978,0.0730,0.8979\n+Small,12x12,ldr-xy-01.png,32.1255,0.0744,0.0499,1.3134\n+Small,12x12,ldr-xy-02.png,40.3053,0.0580,0.0332,1.9760\n+Small,12x12,ldrs-rgba-00.png,22.7006,0.1629,0.1395,0.4696\n+Small,12x12,ldrs-rgba-01.png,24.7093,0.1209,0.0978,0.6701\n+Small,12x12,ldrs-rgba-02.png,20.1960,0.1553,0.1326,0.4944\n+Small,3x3x3,ldr-l-00-3.dds,52.4153,0.0465,0.0392,6.6802\n+Small,3x3x3,ldr-l-01-3.dds,55.4037,0.0207,0.0159,4.3404\n+Small,6x6x6,ldr-l-00-3.dds,33.2725,0.1090,0.0835,3.1409\n+Small,6x6x6,ldr-l-01-3.dds,41.5751,0.0416,0.0191,3.6060\n" }, { "change_type": "MODIFY", "old_path": "Test/astc_test_image.py", "new_path": "Test/astc_test_image.py", "diff": "@@ -310,7 +310,7 @@ def parse_command_line():\nparser.add_argument(\"--encoder\", dest=\"encoders\", default=\"avx2\",\nchoices=coders, help=\"test encoder variant\")\n- parser.add_argument(\"--reference\", dest=\"reference\", default=\"ref-3.5-avx2\",\n+ parser.add_argument(\"--reference\", dest=\"reference\", default=\"ref-main-avx2\",\nchoices=refcoders, help=\"reference encoder variant\")\nastcProfile = [\"ldr\", \"ldrs\", \"hdr\", \"all\"]\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Update main test reference
61,745
03.04.2022 14:09:13
-3,600
ed8ff199d8fd2c81ab441d8f81fe0ef2957e44fc
Sort block_mode storage into bands
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_block_sizes.cpp", "new_path": "Source/astcenc_block_sizes.cpp", "diff": "@@ -759,24 +759,23 @@ static void assign_kmeans_texels(\n* @param y_weights The number of weights in the Y dimension.\n* @param bsd The block size descriptor we are populating.\n* @param wb The decimation table init scratch working buffers.\n- *\n- * @return The new entry's index in the compacted decimation table array.\n+ * @param index The packed array index to populate.\n*/\n-static int construct_dt_entry_2d(\n+void construct_dt_entry_2d(\nunsigned int x_texels,\nunsigned int y_texels,\nunsigned int x_weights,\nunsigned int y_weights,\nblock_size_descriptor& bsd,\n- dt_init_working_buffers& wb\n+ dt_init_working_buffers& wb,\n+ unsigned int index\n) {\n- unsigned int dm_index = bsd.decimation_mode_count;\nunsigned int weight_count = x_weights * y_weights;\nassert(weight_count <= BLOCK_MAX_WEIGHTS);\nbool try_2planes = (2 * weight_count) <= BLOCK_MAX_WEIGHTS;\n- decimation_info& di = bsd.decimation_tables[dm_index];\n+ decimation_info& di = bsd.decimation_tables[index];\ninit_decimation_info_2d(x_texels, y_texels, x_weights, y_weights, di, wb);\nint maxprec_1plane = -1;\n@@ -801,14 +800,8 @@ static int construct_dt_entry_2d(\n// At least one of the two should be valid ...\nassert(maxprec_1plane >= 0 || maxprec_2planes >= 0);\n- bsd.decimation_modes[dm_index].maxprec_1plane = static_cast<int8_t>(maxprec_1plane);\n- bsd.decimation_modes[dm_index].maxprec_2planes = static_cast<int8_t>(maxprec_2planes);\n-\n- // Default to not enabled - we'll populate these based on active block modes\n- bsd.decimation_modes[dm_index].percentile_hit = false;\n-\n- bsd.decimation_mode_count++;\n- return dm_index;\n+ bsd.decimation_modes[index].maxprec_1plane = static_cast<int8_t>(maxprec_1plane);\n+ bsd.decimation_modes[index].maxprec_2planes = static_cast<int8_t>(maxprec_2planes);\n}\n/**\n@@ -838,7 +831,6 @@ static void construct_block_size_descriptor_2d(\nbsd.ydim = static_cast<uint8_t>(y_texels);\nbsd.zdim = 1;\nbsd.texel_count = static_cast<uint8_t>(x_texels * y_texels);\n- bsd.decimation_mode_count = 0;\nfor (unsigned int i = 0; i < MAX_DMI; i++)\n{\n@@ -848,7 +840,7 @@ static void construct_block_size_descriptor_2d(\n// Gather all the decimation grids that can be used with the current block\n#if !defined(ASTCENC_DECOMPRESS_ONLY)\nconst float *percentiles = get_2d_percentile_table(x_texels, y_texels);\n- float always_threshold = 0.0f;\n+ float always_cutoff = 0.0f;\n#else\n// Unused in decompress-only builds\n(void)can_omit_modes;\n@@ -856,121 +848,140 @@ static void construct_block_size_descriptor_2d(\n#endif\n// Construct the list of block formats referencing the decimation tables\n- unsigned int packed_idx = 0;\n- unsigned int always_block_mode_count = 0;\n- unsigned int always_decimation_mode_count = 0;\n+ unsigned int packed_bm_idx = 0;\n+ unsigned int packed_dm_idx = 0;\n- // Iterate twice; first time keep the \"always\" blocks, second time keep the \"non-always\" blocks.\n- // This ensures that the always block modes and decimation modes are at the start of the list.\n- for (unsigned int j = 0; j < 2; j ++)\n+ // Trackers\n+ unsigned int bm_counts[4] { 0 };\n+ unsigned int dm_counts[4] { 0 };\n+\n+ // Clear the list to a known-bad value\n+ for (unsigned int i = 0; i < WEIGHTS_MAX_BLOCK_MODES; i++)\n+ {\n+ bsd.block_mode_packed_index[i] = BLOCK_BAD_BLOCK_MODE;\n+ }\n+\n+ // Iterate four times to build a usefully ordered list:\n+ // - Pass 0 - keep selected single plane \"always\" block modes\n+ // - Pass 1 - keep selected single plane \"non-always\" block modes\n+ // - Pass 2 - keep select dual plane block modes\n+ // - Pass 3 - keep everything else that's legal\n+ unsigned int limit = can_omit_modes ? 3 : 4;\n+ for (unsigned int j = 0; j < limit; j ++)\n{\nfor (unsigned int i = 0; i < WEIGHTS_MAX_BLOCK_MODES; i++)\n{\n+ // Skip modes we've already included in a previous pass\n+ if (bsd.block_mode_packed_index[i] != BLOCK_BAD_BLOCK_MODE)\n+ {\n+ continue;\n+ }\n+\n+ // Decode parameters\nunsigned int x_weights;\nunsigned int y_weights;\nbool is_dual_plane;\nunsigned int quant_mode;\nunsigned int weight_bits;\n+ bool valid = decode_block_mode_2d(i, x_weights, y_weights, is_dual_plane, quant_mode, weight_bits);\n- #if !defined(ASTCENC_DECOMPRESS_ONLY)\n- float percentile = percentiles[i];\n- bool selected = (percentile <= mode_cutoff) || !can_omit_modes;\n-\n- if (j == 0 && percentile > always_threshold)\n+ // Always skip invalid encodings for the current block size\n+ if (!valid || (x_weights > x_texels) || (y_weights > y_texels))\n{\ncontinue;\n}\n- if (j == 1 && percentile <= always_threshold)\n+ // Selectively skip dual plane encodings\n+ if (((j <= 1) && is_dual_plane) || (j == 2 && !is_dual_plane))\n{\ncontinue;\n}\n- #else\n- // Decompressor builds can never discard modes, as we cannot make any\n- // assumptions about the modes the original compressor used\n- bool selected = true;\n-\n- if (j == 1)\n+ // Always skip encodings we can't physically encode based on\n+ // generic encoding bit availability\n+ if (is_dual_plane)\n{\n- continue;\n- }\n- #endif\n-\n- // ASSUMPTION: No compressor will use more weights in a dimension than\n- // the block has actual texels, because it wastes bits. Decompression\n- // of an image which violates this assumption will fail, even though it\n- // is technically permitted by the specification.\n-\n- // Skip modes that are invalid, too large, or not selected by heuristic\n- bool valid = decode_block_mode_2d(i, x_weights, y_weights, is_dual_plane, quant_mode, weight_bits);\n- if (!selected || !valid || (x_weights > x_texels) || (y_weights > y_texels))\n+ // This is the only check we need as only support 1 partition\n+ if ((109 - weight_bits) <= 0)\n{\n- bsd.block_mode_packed_index[i] = BLOCK_BAD_BLOCK_MODE;\ncontinue;\n}\n-\n- // Allocate and initialize the decimation table entry if we've not used it yet\n- int decimation_mode = decimation_mode_index[y_weights * 16 + x_weights];\n- if (decimation_mode < 0)\n+ }\n+ else\n{\n- decimation_mode = construct_dt_entry_2d(x_texels, y_texels, x_weights, y_weights, bsd, *wb);\n- decimation_mode_index[y_weights * 16 + x_weights] = decimation_mode;\n-\n- #if !defined(ASTCENC_DECOMPRESS_ONLY)\n- if (percentile <= always_threshold)\n+ // This is conservative - fewer bits may be available for > 1 partition\n+ if ((111 - weight_bits) <= 0)\n{\n- always_decimation_mode_count++;\n+ continue;\n}\n- #endif\n}\n+ // Selectively skip encodings based on percentile\n+ bool percentile_hit = false;\n#if !defined(ASTCENC_DECOMPRESS_ONLY)\n- // Flatten the block mode heuristic into some precomputed flags\n- if (percentile <= always_threshold)\n- {\n- always_block_mode_count++;\n- bsd.block_modes[packed_idx].percentile_hit = true;\n- bsd.decimation_modes[decimation_mode].percentile_hit = true;\n- }\n- else if (percentile <= mode_cutoff)\n+ if (j == 0)\n{\n- bsd.block_modes[packed_idx].percentile_hit = true;\n- bsd.decimation_modes[decimation_mode].percentile_hit = true;\n+ percentile_hit = percentiles[i] <= always_cutoff;\n}\nelse\n{\n- bsd.block_modes[packed_idx].percentile_hit = false;\n+ percentile_hit = percentiles[i] <= mode_cutoff;\n}\n#endif\n- bsd.block_modes[packed_idx].decimation_mode = static_cast<uint8_t>(decimation_mode);\n- bsd.block_modes[packed_idx].quant_mode = static_cast<uint8_t>(quant_mode);\n- bsd.block_modes[packed_idx].is_dual_plane = static_cast<uint8_t>(is_dual_plane);\n- bsd.block_modes[packed_idx].weight_bits = static_cast<uint8_t>(weight_bits);\n- bsd.block_modes[packed_idx].mode_index = static_cast<uint16_t>(i);\n- bsd.block_mode_packed_index[i] = static_cast<uint16_t>(packed_idx);\n- packed_idx++;\n+ if (j != 3 && !percentile_hit)\n+ {\n+ continue;\n}\n+\n+ // Allocate and initialize the decimation table entry if we've not used it yet\n+ int decimation_mode = decimation_mode_index[y_weights * 16 + x_weights];\n+ if (decimation_mode < 0)\n+ {\n+ construct_dt_entry_2d(x_texels, y_texels, x_weights, y_weights, bsd, *wb, packed_dm_idx);\n+ decimation_mode_index[y_weights * 16 + x_weights] = packed_dm_idx;\n+ decimation_mode = packed_dm_idx;\n+\n+ dm_counts[j]++;\n+ packed_dm_idx++;\n}\n- bsd.block_mode_count = packed_idx;\n- bsd.always_block_mode_count = always_block_mode_count;\n- bsd.always_decimation_mode_count = always_decimation_mode_count;\n+ auto& bm = bsd.block_modes[packed_bm_idx];\n+\n+ bm.decimation_mode = static_cast<uint8_t>(decimation_mode);\n+ bm.quant_mode = static_cast<uint8_t>(quant_mode);\n+ bm.is_dual_plane = static_cast<uint8_t>(is_dual_plane);\n+ bm.weight_bits = static_cast<uint8_t>(weight_bits);\n+ bm.mode_index = static_cast<uint16_t>(i);\n+\n+ bsd.block_mode_packed_index[i] = static_cast<uint16_t>(packed_bm_idx);\n+\n+ packed_bm_idx++;\n+ bm_counts[j]++;\n+ }\n+ }\n+\n+ bsd.block_mode_count_1plane_always = bm_counts[0];\n+ bsd.block_mode_count_1plane_selected = bm_counts[0] + bm_counts[1];\n+ bsd.block_mode_count_1plane_2plane_selected = bm_counts[0] + bm_counts[1] + bm_counts[2];\n+ bsd.block_mode_count_all = bm_counts[0] + bm_counts[1] + bm_counts[2] + bm_counts[3];\n+\n+ bsd.decimation_mode_count_always = dm_counts[0];\n+ bsd.decimation_mode_count_selected = dm_counts[0] + dm_counts[1] + dm_counts[2];\n+ bsd.decimation_mode_count_all = dm_counts[0] + dm_counts[1] + dm_counts[2] + dm_counts[3];\n#if !defined(ASTCENC_DECOMPRESS_ONLY)\n- assert(bsd.always_block_mode_count > 0);\n- assert(bsd.always_decimation_mode_count > 0);\n+ assert(bsd.block_mode_count_1plane_always > 0);\n+ assert(bsd.decimation_mode_count_always > 0);\ndelete[] percentiles;\n#endif\n// Ensure the end of the array contains valid data (should never get read)\n- for (unsigned int i = bsd.decimation_mode_count; i < WEIGHTS_MAX_DECIMATION_MODES; i++)\n+ for (unsigned int i = bsd.decimation_mode_count_all; i < WEIGHTS_MAX_DECIMATION_MODES; i++)\n{\nbsd.decimation_modes[i].maxprec_1plane = -1;\nbsd.decimation_modes[i].maxprec_2planes = -1;\n- bsd.decimation_modes[i].percentile_hit = false;\n}\n// Determine the texels to use for kmeans clustering.\n@@ -1055,7 +1066,6 @@ static void construct_block_size_descriptor_3d(\nbsd.decimation_modes[decimation_mode_count].maxprec_1plane = static_cast<int8_t>(maxprec_1plane);\nbsd.decimation_modes[decimation_mode_count].maxprec_2planes = static_cast<int8_t>(maxprec_2planes);\n- bsd.decimation_modes[decimation_mode_count].percentile_hit = false;\ndecimation_mode_count++;\n}\n}\n@@ -1066,40 +1076,74 @@ static void construct_block_size_descriptor_3d(\n{\nbsd.decimation_modes[i].maxprec_1plane = -1;\nbsd.decimation_modes[i].maxprec_2planes = -1;\n- bsd.decimation_modes[i].percentile_hit = false;\n}\n- bsd.decimation_mode_count = decimation_mode_count;\n+ bsd.decimation_mode_count_always = 0; // Skipped for 3D modes\n+ bsd.decimation_mode_count_selected = decimation_mode_count;\n+ bsd.decimation_mode_count_all = decimation_mode_count;\n// Construct the list of block formats\n+ // Construct the list of block formats referencing the decimation tables\n+\n+ // Clear the list to a known-bad value\n+ for (unsigned int i = 0; i < WEIGHTS_MAX_BLOCK_MODES; i++)\n+ {\n+ bsd.block_mode_packed_index[i] = BLOCK_BAD_BLOCK_MODE;\n+ }\n+\nunsigned int packed_idx = 0;\n+ unsigned int bm_counts[2] { 0 };\n+\n+ // Iterate two times to build a usefully ordered list:\n+ // - Pass 0 - keep valid single plane block modes\n+ // - Pass 1 - keep valid dual plane block modes\n+ for (unsigned int j = 0; j < 2; j++)\n+ {\nfor (unsigned int i = 0; i < WEIGHTS_MAX_BLOCK_MODES; i++)\n{\n+ // Skip modes we've already included in a previous pass\n+ if (bsd.block_mode_packed_index[i] != BLOCK_BAD_BLOCK_MODE)\n+ {\n+ continue;\n+ }\n+\nunsigned int x_weights;\nunsigned int y_weights;\nunsigned int z_weights;\nbool is_dual_plane;\nunsigned int quant_mode;\nunsigned int weight_bits;\n- bool permit_encode = true;\n- if (decode_block_mode_3d(i, x_weights, y_weights, z_weights, is_dual_plane, quant_mode, weight_bits))\n+ bool valid = decode_block_mode_3d(i, x_weights, y_weights, z_weights, is_dual_plane, quant_mode, weight_bits);\n+ // Skip invalid encodings\n+ if (!valid || x_weights > x_texels || y_weights > y_texels || z_weights > z_texels)\n{\n- if (x_weights > x_texels || y_weights > y_texels || z_weights > z_texels)\n+ continue;\n+ }\n+\n+ // Skip encodings in the wrong iteration\n+ if ((j == 0 && is_dual_plane) || (j == 1 && !is_dual_plane))\n{\n- permit_encode = false;\n+ continue;\n+ }\n+\n+ // Always skip encodings we can't physically encode based on bit availability\n+ if (is_dual_plane)\n+ {\n+ // This is the only check we need as only support 1 partition\n+ if ((109 - weight_bits) <= 0)\n+ {\n+ continue;\n}\n}\nelse\n{\n- permit_encode = false;\n- }\n-\n- if (!permit_encode)\n+ // This is conservative - fewer bits may be available for > 1 partition\n+ if ((111 - weight_bits) <= 0)\n{\n- bsd.block_mode_packed_index[i] = BLOCK_BAD_BLOCK_MODE;\ncontinue;\n}\n+ }\nint decimation_mode = decimation_mode_index[z_weights * 64 + y_weights * 8 + x_weights];\nbsd.block_modes[packed_idx].decimation_mode = static_cast<uint8_t>(decimation_mode);\n@@ -1108,20 +1152,17 @@ static void construct_block_size_descriptor_3d(\nbsd.block_modes[packed_idx].is_dual_plane = static_cast<uint8_t>(is_dual_plane);\nbsd.block_modes[packed_idx].mode_index = static_cast<uint16_t>(i);\n- // No percentile table, so enable everything all the time ...\n- bsd.block_modes[packed_idx].percentile_hit = true;\n- bsd.decimation_modes[decimation_mode].percentile_hit = true;\n-\nbsd.block_mode_packed_index[i] = static_cast<uint16_t>(packed_idx);\n-\n+ bm_counts[j]++;\npacked_idx++;\n}\n+ }\n- bsd.block_mode_count = packed_idx;\n-\n- // These are never used = the MODE0 fast path is skipped for 3D blocks\n- bsd.always_block_mode_count = 0;\n- bsd.always_decimation_mode_count = 0;\n+ // TODO: Probably need to do the 1/2 split for 3d modes too ...\n+ bsd.block_mode_count_1plane_always = 0; // Skipped for 3D modes\n+ bsd.block_mode_count_1plane_selected = bm_counts[0];\n+ bsd.block_mode_count_1plane_2plane_selected = bm_counts[0] + bm_counts[1];\n+ bsd.block_mode_count_all = bm_counts[0] + bm_counts[1];\n// Determine the texels to use for kmeans clustering.\nassign_kmeans_texels(bsd);\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_compress_symbolic.cpp", "new_path": "Source/astcenc_compress_symbolic.cpp", "diff": "@@ -384,7 +384,6 @@ static float compress_symbolic_block_for_partition_1plane(\npromise(partition_count > 0);\npromise(config.tune_candidate_limit > 0);\npromise(config.tune_refinement_limit > 0);\n- promise(bsd.decimation_mode_count > 0);\nauto compute_difference = &compute_symbolic_block_difference_1plane;\nif ((partition_count == 1) && !(config.flags & ASTCENC_FLG_MAP_RGBM))\n@@ -405,13 +404,13 @@ static float compress_symbolic_block_for_partition_1plane(\nuint8_t *dec_weights_quant_pvalue = tmpbuf.dec_weights_quant_pvalue;\n// For each decimation mode, compute an ideal set of weights with no quantization\n- unsigned int max_decimation_modes = only_always ? bsd.always_decimation_mode_count\n- : bsd.decimation_mode_count;\n+ unsigned int max_decimation_modes = only_always ? bsd.decimation_mode_count_always\n+ : bsd.decimation_mode_count_selected;\npromise(max_decimation_modes > 0);\nfor (unsigned int i = 0; i < max_decimation_modes; i++)\n{\nconst auto& dm = bsd.get_decimation_mode(i);\n- if (dm.maxprec_1plane < 0 || !dm.percentile_hit)\n+ if (dm.maxprec_1plane < 0)\n{\ncontinue;\n}\n@@ -460,14 +459,15 @@ static float compress_symbolic_block_for_partition_1plane(\n115 - 4, 111 - 4 - PARTITION_INDEX_BITS, 108 - 4 - PARTITION_INDEX_BITS, 105 - 4 - PARTITION_INDEX_BITS\n};\n- unsigned int max_block_modes = only_always ? bsd.always_block_mode_count\n- : bsd.block_mode_count;\n+ unsigned int max_block_modes = only_always ? bsd.block_mode_count_1plane_always\n+ : bsd.block_mode_count_1plane_selected;\npromise(max_block_modes > 0);\nfor (unsigned int i = 0; i < max_block_modes; ++i)\n{\nconst block_mode& bm = bsd.block_modes[i];\n+ assert(!bm.is_dual_plane);\nint bitcount = free_bits_for_partition_count[partition_count - 1] - bm.weight_bits;\n- if (bm.is_dual_plane || !bm.percentile_hit || bitcount <= 0)\n+ if (bitcount <= 0)\n{\nqwt_errors[i] = 1e38f;\ncontinue;\n@@ -508,7 +508,7 @@ static float compress_symbolic_block_for_partition_1plane(\nunsigned int candidate_count = compute_ideal_endpoint_formats(\npi, blk, ei.ep, qwt_bitcounts, qwt_errors,\n- config.tune_candidate_limit, max_block_modes,\n+ config.tune_candidate_limit, 0, max_block_modes,\npartition_format_specifiers, block_mode_index,\ncolor_quant_level, color_quant_level_mod, tmpbuf);\n@@ -521,7 +521,7 @@ static float compress_symbolic_block_for_partition_1plane(\nTRACE_NODE(node0, \"candidate\");\nconst int bm_packed_index = block_mode_index[i];\n- assert(bm_packed_index >= 0 && bm_packed_index < (int)bsd.block_mode_count);\n+ assert(bm_packed_index >= 0 && bm_packed_index < (int)bsd.block_mode_count_1plane_selected);\nconst block_mode& qw_bm = bsd.block_modes[bm_packed_index];\nint decimation_mode = qw_bm.decimation_mode;\n@@ -741,7 +741,7 @@ static float compress_symbolic_block_for_partition_2planes(\n) {\npromise(config.tune_candidate_limit > 0);\npromise(config.tune_refinement_limit > 0);\n- promise(bsd.decimation_mode_count > 0);\n+ promise(bsd.decimation_mode_count_selected > 0);\n// Compute ideal weights and endpoint colors, with no quantization or decimation\nendpoints_and_weights& ei1 = tmpbuf.ei1;\n@@ -756,10 +756,11 @@ static float compress_symbolic_block_for_partition_2planes(\nuint8_t *dec_weights_quant_pvalue = tmpbuf.dec_weights_quant_pvalue;\n// For each decimation mode, compute an ideal set of weights with no quantization\n- for (unsigned int i = 0; i < bsd.decimation_mode_count; i++)\n+ // TODO: Try to split this list into separate 1 and 2 plane lists?\n+ for (unsigned int i = 0; i < bsd.decimation_mode_count_selected; i++)\n{\nconst auto& dm = bsd.get_decimation_mode(i);\n- if (dm.maxprec_2planes < 0 || !dm.percentile_hit)\n+ if (dm.maxprec_2planes < 0)\n{\ncontinue;\n}\n@@ -821,17 +822,15 @@ static float compress_symbolic_block_for_partition_2planes(\nint* qwt_bitcounts = tmpbuf.qwt_bitcounts;\nfloat* qwt_errors = tmpbuf.qwt_errors;\n- for (unsigned int i = 0; i < bsd.block_mode_count; ++i)\n+ unsigned int start_2plane = bsd.block_mode_count_1plane_selected;\n+ unsigned int end_2plane = bsd.block_mode_count_1plane_2plane_selected;\n+\n+ for (unsigned int i = start_2plane; i < end_2plane; i++)\n{\nconst block_mode& bm = bsd.block_modes[i];\n- int bitcount = 109 - bm.weight_bits;\n- if (!bm.is_dual_plane || !bm.percentile_hit || bitcount <= 0)\n- {\n- qwt_errors[i] = 1e38f;\n- continue;\n- }\n+ assert(bm.is_dual_plane);\n- qwt_bitcounts[i] = bitcount;\n+ qwt_bitcounts[i] = 109 - bm.weight_bits;\nif (weight_high_value1[i] > 1.02f * min_wt_cutoff1)\n{\n@@ -887,7 +886,8 @@ static float compress_symbolic_block_for_partition_2planes(\nconst auto& pi = bsd.get_partition_info(1, 0);\nunsigned int candidate_count = compute_ideal_endpoint_formats(\npi, blk, epm, qwt_bitcounts, qwt_errors,\n- config.tune_candidate_limit, bsd.block_mode_count,\n+ config.tune_candidate_limit,\n+ bsd.block_mode_count_1plane_selected, bsd.block_mode_count_1plane_2plane_selected,\npartition_format_specifiers, block_mode_index,\ncolor_quant_level, color_quant_level_mod, tmpbuf);\n@@ -900,7 +900,7 @@ static float compress_symbolic_block_for_partition_2planes(\nTRACE_NODE(node0, \"candidate\");\nconst int bm_packed_index = block_mode_index[i];\n- assert(bm_packed_index >= 0 && bm_packed_index < (int)bsd.block_mode_count);\n+ assert(bm_packed_index >= (int)bsd.block_mode_count_1plane_selected && bm_packed_index < (int)bsd.block_mode_count_1plane_2plane_selected);\nconst block_mode& qw_bm = bsd.block_modes[bm_packed_index];\nint decimation_mode = qw_bm.decimation_mode;\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_internal.h", "new_path": "Source/astcenc_internal.h", "diff": "@@ -615,9 +615,6 @@ struct block_mode\n/** @brief Is a dual weight plane used by this block mode? */\nuint8_t is_dual_plane : 1;\n- /** @brief Is this mode enabled in the current search preset? */\n- uint8_t percentile_hit : 1;\n-\n/**\n* @brief Get the weight quantization used by this block mode.\n*\n@@ -639,9 +636,6 @@ struct decimation_mode\n/** @brief The max weight precision for 2 planes, or -1 if not supported. */\nint8_t maxprec_2planes;\n-\n- /** @brief Is this mode enabled in the current search preset? */\n- uint8_t percentile_hit;\n};\n/**\n@@ -677,28 +671,37 @@ struct block_size_descriptor\n/** @brief The block total texel count. */\nuint8_t texel_count;\n- /** @brief The number of stored decimation modes. */\n- unsigned int decimation_mode_count;\n-\n/**\n* @brief The number of stored decimation modes which are \"always\" modes.\n*\n* Always modes are stored at the start of the decimation_modes list.\n*/\n- unsigned int always_decimation_mode_count;\n+ unsigned int decimation_mode_count_always;\n- /** @brief The number of stored block modes. */\n- unsigned int block_mode_count;\n+ /** @brief The number of stored decimation modes for selected encodings. */\n+ unsigned int decimation_mode_count_selected;\n- /** @brief The number of active partitionings for 1/2/3/4 partitionings. */\n- unsigned int partitioning_count[BLOCK_MAX_PARTITIONS];\n+ /** @brief The number of stored decimation modes for any encoding. */\n+ unsigned int decimation_mode_count_all;\n/**\n* @brief The number of stored block modes which are \"always\" modes.\n*\n* Always modes are stored at the start of the block_modes list.\n*/\n- unsigned int always_block_mode_count;\n+ unsigned int block_mode_count_1plane_always;\n+\n+ /** @brief The number of stored block modes for active 1 plane encodings. */\n+ unsigned int block_mode_count_1plane_selected;\n+\n+ /** @brief The number of stored block modes for active 1 and 2 plane encodings. */\n+ unsigned int block_mode_count_1plane_2plane_selected;\n+\n+ /** @brief The number of stored block modes for any encoding. */\n+ unsigned int block_mode_count_all;\n+\n+ /** @brief The number of active partitionings for 1/2/3/4 partitionings. */\n+ unsigned int partitioning_count[BLOCK_MAX_PARTITIONS];\n/** @brief The active decimation modes, stored in low indices. */\ndecimation_mode decimation_modes[WEIGHTS_MAX_DECIMATION_MODES];\n@@ -781,7 +784,7 @@ struct block_size_descriptor\nconst block_mode& get_block_mode(unsigned int block_mode) const\n{\nunsigned int packed_index = this->block_mode_packed_index[block_mode];\n- assert(packed_index != BLOCK_BAD_BLOCK_MODE && packed_index < this->block_mode_count);\n+ assert(packed_index != BLOCK_BAD_BLOCK_MODE && packed_index < this->block_mode_count_all);\nreturn this->block_modes[packed_index];\n}\n@@ -2116,7 +2119,8 @@ void unpack_weights(\n* @param qwt_bitcounts Bit counts for different quantization methods.\n* @param qwt_errors Errors for different quantization methods.\n* @param tune_candidate_limit The max number of candidates to return, may be less.\n- * @param block_mode_count The number of blocks modes candidates to inspect.\n+ * @param start_block_mode The first block mode to inspect.\n+ * @param end_block_mode The last block mode to inspect.\n* @param[out] partition_format_specifiers The best formats per partition.\n* @param[out] block_mode The best packed block mode indexes.\n* @param[out] quant_level The best color quant level.\n@@ -2132,7 +2136,8 @@ unsigned int compute_ideal_endpoint_formats(\nconst int* qwt_bitcounts,\nconst float* qwt_errors,\nunsigned int tune_candidate_limit,\n- unsigned int block_mode_count,\n+ unsigned int start_block_mode,\n+ unsigned int end_block_mode,\nint partition_format_specifiers[TUNE_MAX_TRIAL_CANDIDATES][BLOCK_MAX_PARTITIONS],\nint block_mode[TUNE_MAX_TRIAL_CANDIDATES],\nquant_method quant_level[TUNE_MAX_TRIAL_CANDIDATES],\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_pick_best_endpoint_format.cpp", "new_path": "Source/astcenc_pick_best_endpoint_format.cpp", "diff": "@@ -1115,7 +1115,8 @@ unsigned int compute_ideal_endpoint_formats(\nconst int* qwt_bitcounts,\nconst float* qwt_errors,\nunsigned int tune_candidate_limit,\n- unsigned int block_mode_count,\n+ unsigned int start_block_mode,\n+ unsigned int end_block_mode,\n// output data\nint partition_format_specifiers[TUNE_MAX_TRIAL_CANDIDATES][BLOCK_MAX_PARTITIONS],\nint block_mode[TUNE_MAX_TRIAL_CANDIDATES],\n@@ -1126,7 +1127,6 @@ unsigned int compute_ideal_endpoint_formats(\nint partition_count = pi.partition_count;\npromise(partition_count > 0);\n- promise(block_mode_count > 0);\nint encode_hdr_rgb = blk.rgb_lns[0];\nint encode_hdr_alpha = blk.alpha_lns[0];\n@@ -1153,8 +1153,17 @@ unsigned int compute_ideal_endpoint_formats(\n// Ensure that the \"overstep\" of the last iteration in the vectorized loop will contain data\n// that will never be picked as best candidate\n- const int packed_mode_count_simd_up = round_up_to_simd_multiple_vla(block_mode_count);\n- for (int i = block_mode_count; i < packed_mode_count_simd_up; i++)\n+ const unsigned int packed_end_block_mode = round_up_to_simd_multiple_vla(end_block_mode);\n+\n+ // TODO: Can we avoid this?\n+ for (unsigned int i = 0; i < start_block_mode; i++)\n+ {\n+ errors_of_best_combination[i] = ERROR_CALC_DEFAULT;\n+ best_quant_levels[i] = QUANT_2;\n+ best_quant_levels_mod[i] = QUANT_2;\n+ }\n+\n+ for (unsigned int i = end_block_mode; i < packed_end_block_mode; i++)\n{\nerrors_of_best_combination[i] = ERROR_CALC_DEFAULT;\nbest_quant_levels[i] = QUANT_2;\n@@ -1168,7 +1177,7 @@ unsigned int compute_ideal_endpoint_formats(\n// The block contains 1 partition\nif (partition_count == 1)\n{\n- for (unsigned int i = 0; i < block_mode_count; ++i)\n+ for (unsigned int i = start_block_mode; i < end_block_mode; ++i)\n{\nif (qwt_errors[i] >= ERROR_CALC_DEFAULT)\n{\n@@ -1200,7 +1209,8 @@ unsigned int compute_ideal_endpoint_formats(\ntwo_partitions_find_best_combination_for_every_quantization_and_integer_count(\nbest_error, format_of_choice, combined_best_error, formats_of_choice);\n- for (unsigned int i = 0; i < block_mode_count; ++i)\n+ assert(start_block_mode == 0);\n+ for (unsigned int i = 0; i < end_block_mode; ++i)\n{\nif (qwt_errors[i] >= ERROR_CALC_DEFAULT)\n{\n@@ -1232,7 +1242,8 @@ unsigned int compute_ideal_endpoint_formats(\nthree_partitions_find_best_combination_for_every_quantization_and_integer_count(\nbest_error, format_of_choice, combined_best_error, formats_of_choice);\n- for (unsigned int i = 0; i < block_mode_count; ++i)\n+ assert(start_block_mode == 0);\n+ for (unsigned int i = 0; i < end_block_mode; ++i)\n{\nif (qwt_errors[i] >= ERROR_CALC_DEFAULT)\n{\n@@ -1265,7 +1276,8 @@ unsigned int compute_ideal_endpoint_formats(\nfour_partitions_find_best_combination_for_every_quantization_and_integer_count(\nbest_error, format_of_choice, combined_best_error, formats_of_choice);\n- for (unsigned int i = 0; i < block_mode_count; ++i)\n+ assert(start_block_mode == 0);\n+ for (unsigned int i = 0; i < end_block_mode; ++i)\n{\nif (qwt_errors[i] >= ERROR_CALC_DEFAULT)\n{\n@@ -1303,8 +1315,10 @@ unsigned int compute_ideal_endpoint_formats(\n{\nvint vbest_error_index(-1);\nvfloat vbest_ep_error(ERROR_CALC_DEFAULT);\n- vint lane_ids = vint::lane_id();\n- for (unsigned int j = 0; j < block_mode_count; j += ASTCENC_SIMD_WIDTH)\n+\n+ start_block_mode = round_down_to_simd_multiple_vla(start_block_mode);\n+ vint lane_ids = vint::lane_id() + vint(start_block_mode);\n+ for (unsigned int j = start_block_mode; j < end_block_mode; j += ASTCENC_SIMD_WIDTH)\n{\nvfloat err = vfloat(&errors_of_best_combination[j]);\nvmask mask1 = err < vbest_ep_error;\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_weight_align.cpp", "new_path": "Source/astcenc_weight_align.cpp", "diff": "@@ -510,13 +510,13 @@ void compute_angular_endpoints_1plane(\nfloat (&low_values)[WEIGHTS_MAX_DECIMATION_MODES][12] = tmpbuf.weight_low_values1;\nfloat (&high_values)[WEIGHTS_MAX_DECIMATION_MODES][12] = tmpbuf.weight_high_values1;\n- unsigned int max_decimation_modes = only_always ? bsd.always_decimation_mode_count\n- : bsd.decimation_mode_count;\n+ unsigned int max_decimation_modes = only_always ? bsd.decimation_mode_count_always\n+ : bsd.decimation_mode_count_selected;\npromise(max_decimation_modes > 0);\nfor (unsigned int i = 0; i < max_decimation_modes; i++)\n{\nconst decimation_mode& dm = bsd.decimation_modes[i];\n- if (dm.maxprec_1plane < 0 || !dm.percentile_hit)\n+ if (dm.maxprec_1plane < 0)\n{\ncontinue;\n}\n@@ -539,16 +539,13 @@ void compute_angular_endpoints_1plane(\n}\n}\n- unsigned int max_block_modes = only_always ? bsd.always_block_mode_count\n- : bsd.block_mode_count;\n+ unsigned int max_block_modes = only_always ? bsd.block_mode_count_1plane_always\n+ : bsd.block_mode_count_1plane_selected;\npromise(max_block_modes > 0);\nfor (unsigned int i = 0; i < max_block_modes; ++i)\n{\nconst block_mode& bm = bsd.block_modes[i];\n- if (bm.is_dual_plane || !bm.percentile_hit)\n- {\n- continue;\n- }\n+ assert(!bm.is_dual_plane);\nunsigned int quant_mode = bm.quant_mode;\nunsigned int decim_mode = bm.decimation_mode;\n@@ -575,11 +572,12 @@ void compute_angular_endpoints_2planes(\nfloat (&low_values2)[WEIGHTS_MAX_DECIMATION_MODES][12] = tmpbuf.weight_low_values2;\nfloat (&high_values2)[WEIGHTS_MAX_DECIMATION_MODES][12] = tmpbuf.weight_high_values2;\n- promise(bsd.decimation_mode_count > 0);\n- for (unsigned int i = 0; i < bsd.decimation_mode_count; i++)\n+ promise(bsd.decimation_mode_count_selected > 0);\n+ // TODO: Split the list into two parts for this one?\n+ for (unsigned int i = 0; i < bsd.decimation_mode_count_selected; i++)\n{\nconst decimation_mode& dm = bsd.decimation_modes[i];\n- if (dm.maxprec_2planes < 0 || !dm.percentile_hit)\n+ if (dm.maxprec_2planes < 0)\n{\ncontinue;\n}\n@@ -612,11 +610,12 @@ void compute_angular_endpoints_2planes(\n}\n}\n- promise(bsd.block_mode_count > 0);\n- for (unsigned int i = 0; i < bsd.block_mode_count; ++i)\n+ // TODO: Skip start of list!\n+ promise(bsd.block_mode_count_1plane_2plane_selected > 0);\n+ for (unsigned int i = 0; i < bsd.block_mode_count_1plane_2plane_selected; ++i)\n{\nconst block_mode& bm = bsd.block_modes[i];\n- if (!bm.is_dual_plane || !bm.percentile_hit)\n+ if (!bm.is_dual_plane)\n{\ncontinue;\n}\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Sort block_mode storage into bands
61,745
03.04.2022 16:00:45
-3,600
cf1a817aa7daf0907000396f8a87aa950f042299
Sort partition_info storage into bands
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_find_best_partitioning.cpp", "new_path": "Source/astcenc_find_best_partitioning.cpp", "diff": "@@ -360,7 +360,7 @@ static void count_partition_mismatch_bits(\nconst uint64_t bitmaps[BLOCK_MAX_PARTITIONS],\nunsigned int mismatch_counts[BLOCK_MAX_PARTITIONINGS]\n) {\n- unsigned int active_count = bsd.partitioning_count[partition_count - 1];\n+ unsigned int active_count = bsd.partitioning_count_selected[partition_count - 1];\nif (partition_count == 2)\n{\n@@ -474,7 +474,7 @@ static void compute_kmeans_partition_ordering(\ncount_partition_mismatch_bits(bsd, partition_count, bitmaps, mismatch_counts);\n// Sort the partitions based on the number of mismatched bits\n- get_partition_ordering_by_mismatch_bits(bsd.partitioning_count[partition_count - 1],\n+ get_partition_ordering_by_mismatch_bits(bsd.partitioning_count_selected[partition_count - 1],\nmismatch_counts, partition_ordering);\n}\n@@ -511,7 +511,7 @@ void find_best_partition_candidates(\nunsigned int partition_sequence[BLOCK_MAX_PARTITIONINGS];\ncompute_kmeans_partition_ordering(bsd, blk, partition_count, partition_sequence);\npartition_search_limit = astc::min(partition_search_limit,\n- bsd.partitioning_count[partition_count - 1]);\n+ bsd.partitioning_count_selected[partition_count - 1]);\nbool uses_alpha = !blk.is_constant_channel(3);\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_internal.h", "new_path": "Source/astcenc_internal.h", "diff": "@@ -700,8 +700,11 @@ struct block_size_descriptor\n/** @brief The number of stored block modes for any encoding. */\nunsigned int block_mode_count_all;\n- /** @brief The number of active partitionings for 1/2/3/4 partitionings. */\n- unsigned int partitioning_count[BLOCK_MAX_PARTITIONS];\n+ /** @brief The number of selected partitionings for 1/2/3/4 partitionings. */\n+ unsigned int partitioning_count_selected[BLOCK_MAX_PARTITIONS];\n+\n+ /** @brief The number of partitionings for 1/2/3/4 partitionings. */\n+ unsigned int partitioning_count_all[BLOCK_MAX_PARTITIONS];\n/** @brief The active decimation modes, stored in low indices. */\ndecimation_mode decimation_modes[WEIGHTS_MAX_DECIMATION_MODES];\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_partition_tables.cpp", "new_path": "Source/astcenc_partition_tables.cpp", "diff": "@@ -381,45 +381,71 @@ static void build_partition_table_for_one_partition_count(\n};\nunsigned int next_index = 0;\n- bsd.partitioning_count[partition_count - 1] = 0;\n+ bsd.partitioning_count_selected[partition_count - 1] = 0;\n+ bsd.partitioning_count_all[partition_count - 1] = 0;\n+ // Skip tables larger than config max partition count if we can omit modes\nif (can_omit_partitionings && (partition_count > partition_count_cutoff))\n{\nreturn;\n}\n+ // Iterate through twice\n+ // - Pass 0: Keep selected partitionings\n+ // - Pass 1: Keep non-selected partitionings (skip if in omit mode)\n+ unsigned int max_iter = can_omit_partitionings ? 1 : 2;\n+\n+ // Tracker for things we built in the first iteration\n+ uint8_t build[BLOCK_MAX_PARTITIONINGS] { 0 };\n+ for (unsigned int x = 0; x < max_iter; x++)\n+ {\nfor (unsigned int i = 0; i < BLOCK_MAX_PARTITIONINGS; i++)\n{\n- bool keep = generate_one_partition_info_entry(bsd, partition_count, i, next_index, ptab[next_index]);\n- if (can_omit_partitionings && !keep)\n+ // Don't include things we built in the first pass\n+ if ((x == 1) && build[i])\n+ {\n+ continue;\n+ }\n+\n+ bool keep_useful = generate_one_partition_info_entry(bsd, partition_count, i, next_index, ptab[next_index]);\n+ if ((x == 0) && !keep_useful)\n{\n- bsd.partitioning_packed_index[partition_count - 2][i] = BLOCK_BAD_PARTITIONING;\ncontinue;\n}\ngenerate_canonical_partitioning(bsd.texel_count, ptab[next_index].partition_of_texel, canonical_patterns + next_index * 7);\n- keep = true;\n+ bool keep_canonical = true;\nfor (unsigned int j = 0; j < next_index; j++)\n{\nbool match = compare_canonical_partitionings(canonical_patterns + 7 * next_index, canonical_patterns + 7 * j);\nif (match)\n{\n- ptab[next_index].partition_count = 0;\n- partitioning_valid[partition_count - 2][next_index] = 255;\n- keep = !can_omit_partitionings;\n+ keep_canonical = false;;\nbreak;\n}\n}\n- if (keep)\n+ if (keep_useful && keep_canonical)\n+ {\n+ if (x == 0)\n{\nbsd.partitioning_packed_index[partition_count - 2][i] = next_index;\n- bsd.partitioning_count[partition_count - 1] = next_index + 1;\n+ bsd.partitioning_count_selected[partition_count - 1]++;\n+ bsd.partitioning_count_all[partition_count - 1]++;\n+ build[i] = 1;\nnext_index++;\n}\n+ }\nelse\n{\n- bsd.partitioning_packed_index[partition_count - 2][i] = BLOCK_BAD_PARTITIONING;\n+ if (x == 1)\n+ {\n+ bsd.partitioning_packed_index[partition_count - 2][i] = next_index;\n+ bsd.partitioning_count_all[partition_count - 1]++;\n+ partitioning_valid[partition_count - 2][next_index] = 255;\n+ next_index++;\n+ }\n+ }\n}\n}\n}\n@@ -436,7 +462,8 @@ void init_partition_tables(\npartition_info* par_tab1 = par_tab4 + BLOCK_MAX_PARTITIONINGS;\ngenerate_one_partition_info_entry(bsd, 1, 0, 0, *par_tab1);\n- bsd.partitioning_count[0] = 1;\n+ bsd.partitioning_count_selected[0] = 1;\n+ bsd.partitioning_count_all[0] = 1;\nuint64_t* canonical_patterns = new uint64_t[BLOCK_MAX_PARTITIONINGS * 7];\nbuild_partition_table_for_one_partition_count(bsd, can_omit_partitionings, partition_count_cutoff, 2, par_tab2, canonical_patterns);\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Sort partition_info storage into bands
61,745
03.04.2022 17:52:51
-3,600
f387f9620ae1846322d82d2a80b60b554398d548
Fix debug build asserts
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_internal.h", "new_path": "Source/astcenc_internal.h", "diff": "@@ -856,7 +856,7 @@ struct block_size_descriptor\npacked_index = this->partitioning_packed_index[partition_count - 2][index];\n}\n- assert(packed_index != BLOCK_BAD_PARTITIONING && packed_index < this->partitioning_count[partition_count - 1]);\n+ assert(packed_index != BLOCK_BAD_PARTITIONING && packed_index < this->partitioning_count_all[partition_count - 1]);\nauto& result = get_partition_table(partition_count)[packed_index];\nassert(index == result.partition_index);\nreturn result;\n@@ -872,7 +872,7 @@ struct block_size_descriptor\n*/\nconst partition_info& get_raw_partition_info(unsigned int partition_count, unsigned int packed_index) const\n{\n- assert(packed_index != BLOCK_BAD_PARTITIONING && packed_index < this->partitioning_count[partition_count - 1]);\n+ assert(packed_index != BLOCK_BAD_PARTITIONING && packed_index < this->partitioning_count_all[partition_count - 1]);\nauto& result = get_partition_table(partition_count)[packed_index];\nreturn result;\n}\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Fix debug build asserts
61,745
03.04.2022 17:56:31
-3,600
28a1473299f2443e06816c3f04be082bd629d69e
Make construct_dt_entry_2d static
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_block_sizes.cpp", "new_path": "Source/astcenc_block_sizes.cpp", "diff": "@@ -761,7 +761,7 @@ static void assign_kmeans_texels(\n* @param wb The decimation table init scratch working buffers.\n* @param index The packed array index to populate.\n*/\n-void construct_dt_entry_2d(\n+static void construct_dt_entry_2d(\nunsigned int x_texels,\nunsigned int y_texels,\nunsigned int x_weights,\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_partition_tables.cpp", "new_path": "Source/astcenc_partition_tables.cpp", "diff": "@@ -420,7 +420,7 @@ static void build_partition_table_for_one_partition_count(\nbool match = compare_canonical_partitionings(canonical_patterns + 7 * next_index, canonical_patterns + 7 * j);\nif (match)\n{\n- keep_canonical = false;;\n+ keep_canonical = false;\nbreak;\n}\n}\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Make construct_dt_entry_2d static
61,745
04.04.2022 09:20:55
-3,600
71683389dbe74812309f46066375459abcbceb53
Update FLG_SELF_DECOMPRESS_ONLY documentation
[ { "change_type": "MODIFY", "old_path": "Source/astcenc.h", "new_path": "Source/astcenc.h", "diff": "*\n* A normal context is capable of decompressing any ASTC texture, including those generated by other\n* compressors with unknown heuristics. This is the most flexible implementation, but forces the\n- * main data tables used by the codec to include entries that are not needed during compressor. This\n- * can slow down compression by ~15%. To optimize this use case the context can be created with the\n- * ASTCENC_FLG_SELF_DECOMPRESS_ONLY flag. This tells the compressor that it will only be asked to\n- * decompress images that it compressed, allowing the size of the context structures to be\n- * substantially reduced with a corresponding boost in performance.\n- *\n- * Attempting to decompress an valid image which was created by another compressor, or even another\n- * astcenc compressor configuration, may result in blocks returning as solid magenta or NaN values\n- * if they use unsupported encodings for that configuration.\n+ * data tables used by the codec to include entries that are not needed during compression. This\n+ * can slow down context creation by a significant amount, especially for the faster compression\n+ * modes where few data table entries are actually used. To optimize this use case the context can\n+ * be created with the ASTCENC_FLG_SELF_DECOMPRESS_ONLY flag. This tells the compressor that it will\n+ * only be asked to decompress images that it compressed itself, allowing the data tables to\n+ * exclude entries that are not needed by the current compression configuration. This reduces the\n+ * size of the context data tables in memory and improves context creation performance. Note that,\n+ * as of the 3.6 release, this flag no longer affects compression performance.\n+ *\n+ * Using this flag while attempting to decompress an valid image which was created by another\n+ * compressor, or even another astcenc compressor version or configuration, may result in blocks\n+ * returning as solid magenta or NaN value error blocks.\n*/\n#ifndef ASTCENC_INCLUDED\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenccli_toplevel.cpp", "new_path": "Source/astcenccli_toplevel.cpp", "diff": "@@ -612,7 +612,7 @@ static int init_astcenc_config(\n{\nprintf(\"ERROR: Required SIMD ISA support missing on this CPU\\n\");\nreturn 1;\n- }\n+\nelse if (status == ASTCENC_ERR_BAD_CPU_FLOAT)\n{\nprintf(\"ERROR: astcenc must not be compiled with -ffast-math\\n\");\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Update FLG_SELF_DECOMPRESS_ONLY documentation
61,745
04.04.2022 09:26:28
-3,600
b615cf6ffb634cce4e200c83de24643fe64d6b32
Add 3.6 entry to change log
[ { "change_type": "MODIFY", "old_path": "Docs/ChangeLog-3x.md", "new_path": "Docs/ChangeLog-3x.md", "diff": "@@ -7,7 +7,33 @@ All performance data on this page is measured on an Intel Core i5-9600K\nclocked at 4.2 GHz, running `astcenc` using AVX2 and 6 threads.\n<!-- ---------------------------------------------------------------------- -->\n-## 3.4\n+## 3.6\n+\n+**Status:** In development\n+\n+There are no planned major improvements for the 3.6 release. We will release it\n+later in the year with a round up of any small improvements made since the 3.5\n+release was made.\n+\n+* **General:**\n+ * **Feature:** Data tables are now optimized for contexts without the\n+ `SELF_DECOMPRESS_ONLY` flag set. The flag therefore no longer improves\n+ compression performance, but still reduces context creation time and\n+ context data table memory footprint.\n+\n+### Performance:\n+\n+Key for charts:\n+\n+* Color = block size (see legend).\n+* Letter = image format (N = normal map, G = grayscale, L = LDR, H = HDR).\n+\n+**Relative performance vs 3.5 release:**\n+\n+TBD ...\n+\n+<!-- ---------------------------------------------------------------------- -->\n+## 3.5\n**Status:** March 2022\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Add 3.6 entry to change log
61,745
07.04.2022 08:45:25
-3,600
54eddabfa46e3781e0b5f562bc89710a016abaef
Fix incorrect weight variable name
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_internal.h", "new_path": "Source/astcenc_internal.h", "diff": "@@ -2212,14 +2212,14 @@ void prepare_angular_tables();\n* @param tune_low_weight_limit Weight count cutoff below which we use simpler searches.\n* @param only_always Only consider block modes that are always enabled.\n* @param bsd The block size descriptor for the current trial.\n- * @param dec_weight_quant_uvalue The decimated and quantized weight values.\n+ * @param dec_weight_ideal_value The ideal decimated unquantized weight values.\n* @param[out] tmpbuf Preallocated scratch buffers for the compressor.\n*/\nvoid compute_angular_endpoints_1plane(\nunsigned int tune_low_weight_limit,\nbool only_always,\nconst block_size_descriptor& bsd,\n- const float* dec_weight_quant_uvalue,\n+ const float* dec_weight_ideal_value,\ncompression_working_buffers& tmpbuf);\n/**\n@@ -2227,13 +2227,13 @@ void compute_angular_endpoints_1plane(\n*\n* @param tune_low_weight_limit Weight count cutoff below which we use simpler searches.\n* @param bsd The block size descriptor for the current trial.\n- * @param dec_weight_quant_uvalue The decimated and quantized weight values.\n+ * @param dec_weight_ideal_value The ideal decimated unquantized weight values.\n* @param[out] tmpbuf Preallocated scratch buffers for the compressor.\n*/\nvoid compute_angular_endpoints_2planes(\nunsigned int tune_low_weight_limit,\nconst block_size_descriptor& bsd,\n- const float* dec_weight_quant_uvalue,\n+ const float* dec_weight_ideal_value,\ncompression_working_buffers& tmpbuf);\n/* ============================================================================\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_weight_align.cpp", "new_path": "Source/astcenc_weight_align.cpp", "diff": "@@ -96,13 +96,13 @@ void prepare_angular_tables()\n* @brief Compute the angular alignment factors and offsets.\n*\n* @param weight_count The number of (decimated) weights.\n- * @param dec_weight_quant_uvalue The decimated and quantized weight values.\n+ * @param dec_weight_ideal_value The ideal decimated unquantized weight values.\n* @param max_angular_steps The maximum number of steps to be tested.\n* @param[out] offsets The output angular offsets array.\n*/\nstatic void compute_angular_offsets(\nunsigned int weight_count,\n- const float* dec_weight_quant_uvalue,\n+ const float* dec_weight_ideal_value,\nunsigned int max_angular_steps,\nfloat* offsets\n) {\n@@ -115,7 +115,7 @@ static void compute_angular_offsets(\nfor (unsigned int i = 0; i < weight_count; i += ASTCENC_SIMD_WIDTH)\n{\n// Add 2^23 and interpreting bits extracts round-to-nearest int\n- vfloat sample = loada(dec_weight_quant_uvalue + i) * (SINCOS_STEPS - 1.0f) + vfloat(12582912.0f);\n+ vfloat sample = loada(dec_weight_ideal_value + i) * (SINCOS_STEPS - 1.0f) + vfloat(12582912.0f);\nvint isample = float_as_int(sample) & vint((SINCOS_STEPS - 1));\nstorea(isample, isamplev + i);\n}\n@@ -149,7 +149,7 @@ static void compute_angular_offsets(\n* forcing samples that should have had one weight value one step up or down.\n*\n* @param weight_count The number of (decimated) weights.\n- * @param dec_weight_quant_uvalue The decimated and quantized weight values.\n+ * @param dec_weight_ideal_value The ideal decimated unquantized weight values.\n* @param max_angular_steps The maximum number of steps to be tested.\n* @param max_quant_steps The maximum quantization level to be tested.\n* @param offsets The angular offsets array.\n@@ -161,7 +161,7 @@ static void compute_angular_offsets(\n*/\nstatic void compute_lowest_and_highest_weight(\nunsigned int weight_count,\n- const float* dec_weight_quant_uvalue,\n+ const float* dec_weight_ideal_value,\nunsigned int max_angular_steps,\nunsigned int max_quant_steps,\nconst float* offsets,\n@@ -188,7 +188,7 @@ static void compute_lowest_and_highest_weight(\nfor (unsigned int j = 0; j < weight_count; ++j)\n{\n- vfloat sval = load1(&dec_weight_quant_uvalue[j]) * rcp_stepsize - offset;\n+ vfloat sval = load1(&dec_weight_ideal_value[j]) * rcp_stepsize - offset;\nvfloat svalrte = round(sval);\nvfloat diff = sval - svalrte;\nerrval += diff * diff;\n@@ -237,14 +237,14 @@ static void compute_lowest_and_highest_weight(\n* @brief The main function for the angular algorithm.\n*\n* @param weight_count The number of (decimated) weights.\n- * @param dec_weight_quant_uvalue The decimated and quantized weight value.\n+ * @param dec_weight_ideal_value The ideal decimated unquantized weight values.\n* @param max_quant_level The maximum quantization level to be tested.\n* @param[out] low_value Per angular step, the lowest weight value.\n* @param[out] high_value Per angular step, the highest weight value.\n*/\nstatic void compute_angular_endpoints_for_quant_levels(\nunsigned int weight_count,\n- const float* dec_weight_quant_uvalue,\n+ const float* dec_weight_ideal_value,\nunsigned int max_quant_level,\nfloat low_value[12],\nfloat high_value[12]\n@@ -253,7 +253,7 @@ static void compute_angular_endpoints_for_quant_levels(\nalignas(ASTCENC_VECALIGN) float angular_offsets[ANGULAR_STEPS];\nunsigned int max_angular_steps = max_angular_steps_needed_for_quant_level[max_quant_level];\n- compute_angular_offsets(weight_count, dec_weight_quant_uvalue,\n+ compute_angular_offsets(weight_count, dec_weight_ideal_value,\nmax_angular_steps, angular_offsets);\nalignas(ASTCENC_VECALIGN) int32_t lowest_weight[ANGULAR_STEPS];\n@@ -262,7 +262,7 @@ static void compute_angular_endpoints_for_quant_levels(\nalignas(ASTCENC_VECALIGN) float cut_low_weight_error[ANGULAR_STEPS];\nalignas(ASTCENC_VECALIGN) float cut_high_weight_error[ANGULAR_STEPS];\n- compute_lowest_and_highest_weight(weight_count, dec_weight_quant_uvalue,\n+ compute_lowest_and_highest_weight(weight_count, dec_weight_ideal_value,\nmax_angular_steps, max_quant_steps,\nangular_offsets, lowest_weight, weight_span, error,\ncut_low_weight_error, cut_high_weight_error);\n@@ -416,14 +416,14 @@ static void compute_lowest_and_highest_weight_lwc(\n* @brief The main function for the angular algorithm, variant for low weight count.\n*\n* @param weight_count The number of (decimated) weights.\n- * @param dec_weight_quant_uvalue The decimated and quantized weight value.\n+ * @param dec_weight_ideal_value The ideal decimated unquantized weight values.\n* @param max_quant_level The maximum quantization level to be tested.\n* @param[out] low_value Per angular step, the lowest weight value.\n* @param[out] high_value Per angular step, the highest weight value.\n*/\nstatic void compute_angular_endpoints_for_quant_levels_lwc(\nunsigned int weight_count,\n- const float* dec_weight_quant_uvalue,\n+ const float* dec_weight_ideal_value,\nunsigned int max_quant_level,\nfloat low_value[12],\nfloat high_value[12]\n@@ -436,11 +436,11 @@ static void compute_angular_endpoints_for_quant_levels_lwc(\nalignas(ASTCENC_VECALIGN) int32_t weight_span[ANGULAR_STEPS];\nalignas(ASTCENC_VECALIGN) float error[ANGULAR_STEPS];\n- compute_angular_offsets(weight_count, dec_weight_quant_uvalue,\n+ compute_angular_offsets(weight_count, dec_weight_ideal_value,\nmax_angular_steps, angular_offsets);\n- compute_lowest_and_highest_weight_lwc(weight_count, dec_weight_quant_uvalue,\n+ compute_lowest_and_highest_weight_lwc(weight_count, dec_weight_ideal_value,\nmax_angular_steps, max_quant_steps,\nangular_offsets, lowest_weight, weight_span, error);\n@@ -501,7 +501,7 @@ void compute_angular_endpoints_1plane(\nunsigned int tune_low_weight_limit,\nbool only_always,\nconst block_size_descriptor& bsd,\n- const float* dec_weight_quant_uvalue,\n+ const float* dec_weight_ideal_value,\ncompression_working_buffers& tmpbuf\n) {\nfloat (&low_value)[WEIGHTS_MAX_BLOCK_MODES] = tmpbuf.weight_low_value1;\n@@ -527,14 +527,14 @@ void compute_angular_endpoints_1plane(\n{\ncompute_angular_endpoints_for_quant_levels_lwc(\nweight_count,\n- dec_weight_quant_uvalue + i * BLOCK_MAX_WEIGHTS,\n+ dec_weight_ideal_value + i * BLOCK_MAX_WEIGHTS,\ndm.maxprec_1plane, low_values[i], high_values[i]);\n}\nelse\n{\ncompute_angular_endpoints_for_quant_levels(\nweight_count,\n- dec_weight_quant_uvalue + i * BLOCK_MAX_WEIGHTS,\n+ dec_weight_ideal_value + i * BLOCK_MAX_WEIGHTS,\ndm.maxprec_1plane, low_values[i], high_values[i]);\n}\n}\n@@ -559,7 +559,7 @@ void compute_angular_endpoints_1plane(\nvoid compute_angular_endpoints_2planes(\nunsigned int tune_low_weight_limit,\nconst block_size_descriptor& bsd,\n- const float* dec_weight_quant_uvalue,\n+ const float* dec_weight_ideal_value,\ncompression_working_buffers& tmpbuf\n) {\nfloat (&low_value1)[WEIGHTS_MAX_BLOCK_MODES] = tmpbuf.weight_low_value1;\n@@ -588,24 +588,24 @@ void compute_angular_endpoints_2planes(\n{\ncompute_angular_endpoints_for_quant_levels_lwc(\nweight_count,\n- dec_weight_quant_uvalue + i * BLOCK_MAX_WEIGHTS,\n+ dec_weight_ideal_value + i * BLOCK_MAX_WEIGHTS,\ndm.maxprec_2planes, low_values1[i], high_values1[i]);\ncompute_angular_endpoints_for_quant_levels_lwc(\nweight_count,\n- dec_weight_quant_uvalue + i * BLOCK_MAX_WEIGHTS + WEIGHTS_PLANE2_OFFSET,\n+ dec_weight_ideal_value + i * BLOCK_MAX_WEIGHTS + WEIGHTS_PLANE2_OFFSET,\ndm.maxprec_2planes, low_values2[i], high_values2[i]);\n}\nelse\n{\ncompute_angular_endpoints_for_quant_levels(\nweight_count,\n- dec_weight_quant_uvalue + i * BLOCK_MAX_WEIGHTS,\n+ dec_weight_ideal_value + i * BLOCK_MAX_WEIGHTS,\ndm.maxprec_2planes, low_values1[i], high_values1[i]);\ncompute_angular_endpoints_for_quant_levels(\nweight_count,\n- dec_weight_quant_uvalue + i * BLOCK_MAX_WEIGHTS + WEIGHTS_PLANE2_OFFSET,\n+ dec_weight_ideal_value + i * BLOCK_MAX_WEIGHTS + WEIGHTS_PLANE2_OFFSET,\ndm.maxprec_2planes, low_values2[i], high_values2[i]);\n}\n}\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Fix incorrect weight variable name
61,745
07.04.2022 09:01:38
-3,600
458c52086e4b7bb244ea2d0ed6945259835e9ef1
Use returned partition match count from search
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_block_sizes.cpp", "new_path": "Source/astcenc_block_sizes.cpp", "diff": "@@ -1158,7 +1158,6 @@ static void construct_block_size_descriptor_3d(\n}\n}\n- // TODO: Probably need to do the 1/2 split for 3d modes too ...\nbsd.block_mode_count_1plane_always = 0; // Skipped for 3D modes\nbsd.block_mode_count_1plane_selected = bm_counts[0];\nbsd.block_mode_count_1plane_2plane_selected = bm_counts[0] + bm_counts[1];\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_compress_symbolic.cpp", "new_path": "Source/astcenc_compress_symbolic.cpp", "diff": "@@ -756,7 +756,6 @@ static float compress_symbolic_block_for_partition_2planes(\nuint8_t *dec_weights_quant_pvalue = tmpbuf.dec_weights_quant_pvalue;\n// For each decimation mode, compute an ideal set of weights with no quantization\n- // TODO: Try to split this list into separate 1 and 2 plane lists?\nfor (unsigned int i = 0; i < bsd.decimation_mode_count_selected; i++)\n{\nconst auto& dm = bsd.get_decimation_mode(i);\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_find_best_partitioning.cpp", "new_path": "Source/astcenc_find_best_partitioning.cpp", "diff": "@@ -394,8 +394,10 @@ static void count_partition_mismatch_bits(\n* @param partitioning_count The number of packed partitionings.\n* @param mismatch_count Partitioning mismatch counts, in index order.\n* @param[out] partition_ordering Partition index values, in mismatch order.\n+ *\n+ * @return The number of active partitions in this selection.\n*/\n-static void get_partition_ordering_by_mismatch_bits(\n+static unsigned int get_partition_ordering_by_mismatch_bits(\nunsigned int partitioning_count,\nconst unsigned int mismatch_count[BLOCK_MAX_PARTITIONINGS],\nunsigned int partition_ordering[BLOCK_MAX_PARTITIONINGS]\n@@ -408,6 +410,8 @@ static void get_partition_ordering_by_mismatch_bits(\nmscount[mismatch_count[i]]++;\n}\n+ unsigned int active_count = partitioning_count - mscount[255];\n+\n// Create a running sum from the histogram array\n// Cells store previous values only; i.e. exclude self after sum\nunsigned int summa = 0;\n@@ -425,6 +429,8 @@ static void get_partition_ordering_by_mismatch_bits(\nunsigned int idx = mscount[mismatch_count[i]]++;\npartition_ordering[idx] = i;\n}\n+\n+ return active_count;\n}\n/**\n@@ -434,8 +440,10 @@ static void get_partition_ordering_by_mismatch_bits(\n* @param blk The image block color data to compress.\n* @param partition_count The desired number of partitions in the block.\n* @param[out] partition_ordering The list of recommended partition indices, in priority order.\n+ *\n+ * @return The number of active partitionings in this selection.\n*/\n-static void compute_kmeans_partition_ordering(\n+static unsigned int compute_kmeans_partition_ordering(\nconst block_size_descriptor& bsd,\nconst image_block& blk,\nunsigned int partition_count,\n@@ -474,7 +482,8 @@ static void compute_kmeans_partition_ordering(\ncount_partition_mismatch_bits(bsd, partition_count, bitmaps, mismatch_counts);\n// Sort the partitions based on the number of mismatched bits\n- get_partition_ordering_by_mismatch_bits(bsd.partitioning_count_selected[partition_count - 1],\n+ return get_partition_ordering_by_mismatch_bits(\n+ bsd.partitioning_count_selected[partition_count - 1],\nmismatch_counts, partition_ordering);\n}\n@@ -509,9 +518,8 @@ void find_best_partition_candidates(\nweight_imprecision_estim = weight_imprecision_estim * weight_imprecision_estim;\nunsigned int partition_sequence[BLOCK_MAX_PARTITIONINGS];\n- compute_kmeans_partition_ordering(bsd, blk, partition_count, partition_sequence);\n- partition_search_limit = astc::min(partition_search_limit,\n- bsd.partitioning_count_selected[partition_count - 1]);\n+ unsigned int sequence_len = compute_kmeans_partition_ordering(bsd, blk, partition_count, partition_sequence);\n+ partition_search_limit = astc::min(partition_search_limit, sequence_len);\nbool uses_alpha = !blk.is_constant_channel(3);\n@@ -531,16 +539,6 @@ void find_best_partition_candidates(\nunsigned int partition = partition_sequence[i];\nconst auto& pi = bsd.get_raw_partition_info(partition_count, partition);\n- // TODO: This escape shouldn't really be needed. We should return\n- // the number of blocks which have usable (!= 255) mismatch count\n- // from compute_kmeans_partition_ordering and use that as the upper\n- // loop limit.\n- unsigned int bk_partition_count = pi.partition_count;\n- if (bk_partition_count < partition_count)\n- {\n- break;\n- }\n-\n// Compute weighting to give to each component in each partition\npartition_metrics pms[BLOCK_MAX_PARTITIONS];\n@@ -634,16 +632,6 @@ void find_best_partition_candidates(\nunsigned int partition = partition_sequence[i];\nconst auto& pi = bsd.get_raw_partition_info(partition_count, partition);\n- // TODO: This escape shouldn't really be needed. We should return\n- // the number of blocks which have usable (!= 255) mismatch count\n- // from compute_kmeans_partition_ordering and use that as the upper\n- // loop limit.\n- unsigned int bk_partition_count = pi.partition_count;\n- if (bk_partition_count < partition_count)\n- {\n- break;\n- }\n-\n// Compute weighting to give to each component in each partition\npartition_metrics pms[BLOCK_MAX_PARTITIONS];\ncompute_avgs_and_dirs_3_comp_rgb(pi, blk, pms);\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Use returned partition match count from search
61,745
07.04.2022 09:09:56
-3,600
79dd9897e2f1f34a1414f583a23c7956b886abd1
Avoid copying 1 plane low/high results in 2 plane mode
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_ideal_endpoints_and_weights.cpp", "new_path": "Source/astcenc_ideal_endpoints_and_weights.cpp", "diff": "@@ -1054,7 +1054,6 @@ static inline vfloat4 compute_rgbo_vector(\n}\n/* See header for documentation. */\n-// TODO: Specialize for 1 partition?\nvoid recompute_ideal_colors_1plane(\nconst image_block& blk,\nconst partition_info& pi,\n@@ -1086,7 +1085,6 @@ void recompute_ideal_colors_1plane(\nunsigned int texel_count = pi.partition_texel_count[i];\nconst uint8_t *texel_indexes = pi.texels_of_partition[i];\n- // TODO: Use gathers?\npromise(texel_count > 0);\nfor (unsigned int j = 0; j < texel_count; j++)\n{\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_internal.h", "new_path": "Source/astcenc_internal.h", "diff": "@@ -540,9 +540,6 @@ struct partition_info\n*/\nstruct decimation_info\n{\n- // TODO: These structures are large. Any partitioning opportunities to\n- // improve caching and reduce miss rates?\n-\n/** @brief The total number of texels in the block. */\nuint8_t texel_count;\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_weight_align.cpp", "new_path": "Source/astcenc_weight_align.cpp", "diff": "@@ -573,7 +573,6 @@ void compute_angular_endpoints_2planes(\nfloat (&high_values2)[WEIGHTS_MAX_DECIMATION_MODES][12] = tmpbuf.weight_high_values2;\npromise(bsd.decimation_mode_count_selected > 0);\n- // TODO: Split the list into two parts for this one?\nfor (unsigned int i = 0; i < bsd.decimation_mode_count_selected; i++)\n{\nconst decimation_mode& dm = bsd.decimation_modes[i];\n@@ -610,16 +609,11 @@ void compute_angular_endpoints_2planes(\n}\n}\n- // TODO: Skip start of list!\n- promise(bsd.block_mode_count_1plane_2plane_selected > 0);\n- for (unsigned int i = 0; i < bsd.block_mode_count_1plane_2plane_selected; ++i)\n+ unsigned int start = bsd.block_mode_count_1plane_selected;\n+ unsigned int end = bsd.block_mode_count_1plane_2plane_selected;\n+ for (unsigned int i = start; i < end; i++)\n{\nconst block_mode& bm = bsd.block_modes[i];\n- if (!bm.is_dual_plane)\n- {\n- continue;\n- }\n-\nunsigned int quant_mode = bm.quant_mode;\nunsigned int decim_mode = bm.decimation_mode;\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Avoid copying 1 plane low/high results in 2 plane mode
61,745
07.04.2022 22:42:21
-3,600
d3d7cc1fd8964eec9734451d87358dc892104042
Only compute partition mean for >1 partition
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_averages_and_directions.cpp", "new_path": "Source/astcenc_averages_and_directions.cpp", "diff": "@@ -35,20 +35,27 @@ void compute_avgs_and_dirs_4_comp(\nint partition_count = pi.partition_count;\npromise(partition_count > 0);\n+ vfloat4 average = blk.data_mean;\n+\nfor (int partition = 0; partition < partition_count; partition++)\n{\nconst uint8_t *texel_indexes = pi.texels_of_partition[partition];\nunsigned int texel_count = pi.partition_texel_count[partition];\npromise(texel_count > 0);\n- vfloat4 base_sum = vfloat4::zero();\n+ // Only compute a partition mean if more than one partition\n+ if (partition_count > 1)\n+ {\n+ average = vfloat4::zero();\nfor (unsigned int i = 0; i < texel_count; i++)\n{\nint iwt = texel_indexes[i];\n- base_sum += blk.texel(iwt);\n+ average += blk.texel(iwt);\n+ }\n+\n+ average = average * (1.0f / static_cast<float>(texel_count));\n}\n- vfloat4 average = base_sum / static_cast<float>(texel_count);\npm[partition].avg = average;\nvfloat4 sum_xp = vfloat4::zero();\n@@ -113,6 +120,7 @@ void compute_avgs_and_dirs_3_comp(\npartition_metrics pm[BLOCK_MAX_PARTITIONS]\n) {\nfloat texel_weight = hadd_s(blk.channel_weight.swz<0, 1, 2>()) / 3.0f;\n+ vfloat4 average = blk.data_mean.swz<0, 1, 2>();\nconst float* data_vr = blk.data_r;\nconst float* data_vg = blk.data_g;\n@@ -121,6 +129,7 @@ void compute_avgs_and_dirs_3_comp(\nif (omitted_component == 0)\n{\ntexel_weight = hadd_s(blk.channel_weight.swz<1, 2, 3>()) / 3.0f;\n+ average = blk.data_mean.swz<1, 2, 3>();\ndata_vr = blk.data_g;\ndata_vg = blk.data_b;\n@@ -129,6 +138,7 @@ void compute_avgs_and_dirs_3_comp(\nelse if (omitted_component == 1)\n{\ntexel_weight = hadd_s(blk.channel_weight.swz<0, 2, 3>()) / 3.0f;\n+ average = blk.data_mean.swz<0, 2, 3>();\ndata_vg = blk.data_b;\ndata_vb = blk.data_a;\n@@ -136,6 +146,7 @@ void compute_avgs_and_dirs_3_comp(\nelse if (omitted_component == 2)\n{\ntexel_weight = hadd_s(blk.channel_weight.swz<0, 1, 3>()) / 3.0f;\n+ average = blk.data_mean.swz<0, 1, 3>();\ndata_vb = blk.data_a;\n}\n@@ -149,14 +160,19 @@ void compute_avgs_and_dirs_3_comp(\nunsigned int texel_count = pi.partition_texel_count[partition];\npromise(texel_count > 0);\n- vfloat4 base_sum = vfloat4::zero();\n+ // Only compute a partition mean if more than one partition\n+ if (partition_count > 1)\n+ {\n+ average = vfloat4::zero();\nfor (unsigned int i = 0; i < texel_count; i++)\n{\nunsigned int iwt = texel_indexes[i];\n- base_sum += vfloat3(data_vr[iwt], data_vg[iwt], data_vb[iwt]);\n+ average += vfloat3(data_vr[iwt], data_vg[iwt], data_vb[iwt]);\n+ }\n+\n+ average = average * (1.0f / static_cast<float>(texel_count));\n}\n- vfloat4 average = base_sum / static_cast<float>(texel_count);\npm[partition].avg = average;\nvfloat4 sum_xp = vfloat4::zero();\n@@ -217,20 +233,27 @@ void compute_avgs_and_dirs_3_comp_rgb(\nunsigned int partition_count = pi.partition_count;\npromise(partition_count > 0);\n+ vfloat4 average = blk.data_mean.swz<0, 1, 2>();\n+\nfor (unsigned int partition = 0; partition < partition_count; partition++)\n{\nconst uint8_t *texel_indexes = pi.texels_of_partition[partition];\nunsigned int texel_count = pi.partition_texel_count[partition];\npromise(texel_count > 0);\n- vfloat4 base_sum = vfloat4::zero();\n+ // Only compute a partition mean if more than one partition\n+ if (partition_count > 1)\n+ {\n+ average = vfloat4::zero();\nfor (unsigned int i = 0; i < texel_count; i++)\n{\nunsigned int iwt = texel_indexes[i];\n- base_sum += blk.texel3(iwt);\n+ average += blk.texel3(iwt);\n+ }\n+\n+ average = average * (1.0f / static_cast<float>(texel_count));\n}\n- vfloat4 average = base_sum / static_cast<float>(texel_count);\npm[partition].avg = average;\nvfloat4 sum_xp = vfloat4::zero();\n@@ -287,6 +310,7 @@ void compute_avgs_and_dirs_2_comp(\npartition_metrics pm[BLOCK_MAX_PARTITIONS]\n) {\nfloat texel_weight;\n+ vfloat4 average;\nconst float* data_vr = nullptr;\nconst float* data_vg = nullptr;\n@@ -294,6 +318,7 @@ void compute_avgs_and_dirs_2_comp(\nif (component1 == 0 && component2 == 1)\n{\ntexel_weight = hadd_s(blk.channel_weight.swz<0, 1>()) / 2.0f;\n+ average = blk.data_mean.swz<0, 1>();\ndata_vr = blk.data_r;\ndata_vg = blk.data_g;\n@@ -301,6 +326,7 @@ void compute_avgs_and_dirs_2_comp(\nelse if (component1 == 0 && component2 == 2)\n{\ntexel_weight = hadd_s(blk.channel_weight.swz<0, 2>()) / 2.0f;\n+ average = blk.data_mean.swz<0, 2>();\ndata_vr = blk.data_r;\ndata_vg = blk.data_b;\n@@ -310,6 +336,7 @@ void compute_avgs_and_dirs_2_comp(\nassert(component1 == 1 && component2 == 2);\ntexel_weight = hadd_s(blk.channel_weight.swz<1, 2>()) / 2.0f;\n+ average = blk.data_mean.swz<1, 2>();\ndata_vr = blk.data_g;\ndata_vg = blk.data_b;\n@@ -324,14 +351,19 @@ void compute_avgs_and_dirs_2_comp(\nunsigned int texel_count = pt.partition_texel_count[partition];\npromise(texel_count > 0);\n- vfloat4 base_sum = vfloat4::zero();\n+ // Only compute a partition mean if more than one partition\n+ if (partition_count > 1)\n+ {\n+ average = vfloat4::zero();\nfor (unsigned int i = 0; i < texel_count; i++)\n{\nunsigned int iwt = texel_indexes[i];\n- base_sum += vfloat2(data_vr[iwt], data_vg[iwt]);\n+ average += vfloat2(data_vr[iwt], data_vg[iwt]);\n+ }\n+\n+ average = average * (1.0f / static_cast<float>(texel_count));\n}\n- vfloat4 average = base_sum / static_cast<float>(texel_count);\npm[partition].avg = average;\nvfloat4 sum_xp = vfloat4::zero();\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_ideal_endpoints_and_weights.cpp", "new_path": "Source/astcenc_ideal_endpoints_and_weights.cpp", "diff": "@@ -1064,8 +1064,8 @@ void recompute_ideal_colors_1plane(\nvfloat4 rgbs_vectors[BLOCK_MAX_PARTITIONS],\nvfloat4 rgbo_vectors[BLOCK_MAX_PARTITIONS]\n) {\n- int weight_count = di.weight_count;\n- int partition_count = pi.partition_count;\n+ unsigned int weight_count = di.weight_count;\n+ unsigned int partition_count = pi.partition_count;\npromise(weight_count > 0);\npromise(partition_count > 0);\n@@ -1073,24 +1073,29 @@ void recompute_ideal_colors_1plane(\nconst quantization_and_transfer_table& qat = quant_and_xfer_tables[weight_quant_mode];\nfloat dec_weight_quant_uvalue[BLOCK_MAX_WEIGHTS];\n- for (int i = 0; i < weight_count; i++)\n+ for (unsigned int i = 0; i < weight_count; i++)\n{\ndec_weight_quant_uvalue[i] = qat.unquantized_value[dec_weights_quant_pvalue[i]] * (1.0f / 64.0f);\n}\n- for (int i = 0; i < partition_count; i++)\n- {\n- vfloat4 rgba_sum(1e-17f);\n+ vfloat4 rgba_sum(blk.data_mean * static_cast<float>(blk.texel_count));\n+ for (unsigned int i = 0; i < partition_count; i++)\n+ {\nunsigned int texel_count = pi.partition_texel_count[i];\nconst uint8_t *texel_indexes = pi.texels_of_partition[i];\n+ // Only compute a partition mean if more than one partition\n+ if (partition_count > 1)\n+ {\n+ rgba_sum = vfloat4(1e-17f);\npromise(texel_count > 0);\nfor (unsigned int j = 0; j < texel_count; j++)\n{\nunsigned int tix = texel_indexes[j];\nrgba_sum += blk.texel(tix);\n}\n+ }\nrgba_sum = rgba_sum * blk.channel_weight;\nvfloat4 rgba_weight_sum = max(blk.channel_weight * static_cast<float>(texel_count), 1e-17f);\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Only compute partition mean for >1 partition
61,745
08.04.2022 08:40:31
-3,600
2f86d3c70c3dd05de8bba75ef86bd75887b4131d
Filter decimation modes based on acutal config usage Previous releases selected decimation modes that were used, but assume that both 1 and 2 plane modes were active if feasible to do so. In reality the prerequistite block mode was often not enabled so we were processing more block modes than needed.
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_block_sizes.cpp", "new_path": "Source/astcenc_block_sizes.cpp", "diff": "@@ -802,6 +802,8 @@ static void construct_dt_entry_2d(\nassert(maxprec_1plane >= 0 || maxprec_2planes >= 0);\nbsd.decimation_modes[index].maxprec_1plane = static_cast<int8_t>(maxprec_1plane);\nbsd.decimation_modes[index].maxprec_2planes = static_cast<int8_t>(maxprec_2planes);\n+ bsd.decimation_modes[index].ref_1_plane = 0;\n+ bsd.decimation_modes[index].ref_2_planes= 0;\n}\n/**\n@@ -947,6 +949,16 @@ static void construct_block_size_descriptor_2d(\n}\nauto& bm = bsd.block_modes[packed_bm_idx];\n+ auto& dm = bsd.decimation_modes[decimation_mode];\n+\n+ if (is_dual_plane)\n+ {\n+ dm.ref_2_planes= 1;\n+ }\n+ else\n+ {\n+ dm.ref_1_plane = 1;\n+ }\nbm.decimation_mode = static_cast<uint8_t>(decimation_mode);\nbm.quant_mode = static_cast<uint8_t>(quant_mode);\n@@ -982,6 +994,8 @@ static void construct_block_size_descriptor_2d(\n{\nbsd.decimation_modes[i].maxprec_1plane = -1;\nbsd.decimation_modes[i].maxprec_2planes = -1;\n+ bsd.decimation_modes[i].ref_1_plane = 0;\n+ bsd.decimation_modes[i].ref_2_planes= 0;\n}\n// Determine the texels to use for kmeans clustering.\n@@ -1066,6 +1080,8 @@ static void construct_block_size_descriptor_3d(\nbsd.decimation_modes[decimation_mode_count].maxprec_1plane = static_cast<int8_t>(maxprec_1plane);\nbsd.decimation_modes[decimation_mode_count].maxprec_2planes = static_cast<int8_t>(maxprec_2planes);\n+ bsd.decimation_modes[decimation_mode_count].ref_1_plane = maxprec_1plane == -1 ? 0 : 1;\n+ bsd.decimation_modes[decimation_mode_count].ref_2_planes= maxprec_2planes == -1 ? 0 : 1;\ndecimation_mode_count++;\n}\n}\n@@ -1076,6 +1092,8 @@ static void construct_block_size_descriptor_3d(\n{\nbsd.decimation_modes[i].maxprec_1plane = -1;\nbsd.decimation_modes[i].maxprec_2planes = -1;\n+ bsd.decimation_modes[i].ref_1_plane = 0;\n+ bsd.decimation_modes[i].ref_2_planes= 0;\n}\nbsd.decimation_mode_count_always = 0; // Skipped for 3D modes\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_compress_symbolic.cpp", "new_path": "Source/astcenc_compress_symbolic.cpp", "diff": "@@ -410,7 +410,7 @@ static float compress_symbolic_block_for_partition_1plane(\nfor (unsigned int i = 0; i < max_decimation_modes; i++)\n{\nconst auto& dm = bsd.get_decimation_mode(i);\n- if (dm.maxprec_1plane < 0)\n+ if (!dm.ref_1_plane)\n{\ncontinue;\n}\n@@ -759,7 +759,7 @@ static float compress_symbolic_block_for_partition_2planes(\nfor (unsigned int i = 0; i < bsd.decimation_mode_count_selected; i++)\n{\nconst auto& dm = bsd.get_decimation_mode(i);\n- if (dm.maxprec_2planes < 0)\n+ if (!dm.ref_2_planes)\n{\ncontinue;\n}\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_internal.h", "new_path": "Source/astcenc_internal.h", "diff": "@@ -633,6 +633,12 @@ struct decimation_mode\n/** @brief The max weight precision for 2 planes, or -1 if not supported. */\nint8_t maxprec_2planes;\n+\n+ /** @brief Was this actually referenced by an active 1 plane mode? */\n+ uint8_t ref_1_plane;\n+\n+ /** @brief Was this actually referenced by an active 2 plane mode? */\n+ uint8_t ref_2_planes;\n};\n/**\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_weight_align.cpp", "new_path": "Source/astcenc_weight_align.cpp", "diff": "@@ -516,7 +516,7 @@ void compute_angular_endpoints_1plane(\nfor (unsigned int i = 0; i < max_decimation_modes; i++)\n{\nconst decimation_mode& dm = bsd.decimation_modes[i];\n- if (dm.maxprec_1plane < 0)\n+ if (!dm.ref_1_plane)\n{\ncontinue;\n}\n@@ -576,7 +576,7 @@ void compute_angular_endpoints_2planes(\nfor (unsigned int i = 0; i < bsd.decimation_mode_count_selected; i++)\n{\nconst decimation_mode& dm = bsd.decimation_modes[i];\n- if (dm.maxprec_2planes < 0)\n+ if (!dm.ref_2_planes)\n{\ncontinue;\n}\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Filter decimation modes based on acutal config usage Previous releases selected decimation modes that were used, but assume that both 1 and 2 plane modes were active if feasible to do so. In reality the prerequistite block mode was often not enabled so we were processing more block modes than needed.
61,745
08.04.2022 23:10:14
-3,600
7b1b3bcb4663c0963750fd46adc8e0e0afc30843
Scalarize error after loop in realign_weights_decimated
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_compress_symbolic.cpp", "new_path": "Source/astcenc_compress_symbolic.cpp", "diff": "@@ -318,19 +318,20 @@ static bool realign_weights_decimated(\nvfloat4 color = color_base + color_offset * plane_weight;\nvfloat4 orig_color = blk.texel(texel);\n- vfloat4 error_weight = blk.channel_weight;\nvfloat4 color_diff = color - orig_color;\nvfloat4 color_up_diff = color_diff + color_offset * plane_up_weight;\nvfloat4 color_down_diff = color_diff + color_offset * plane_down_weight;\n- current_errorv += dot(color_diff * color_diff, error_weight);\n- up_errorv += dot(color_up_diff * color_up_diff, error_weight);\n- down_errorv += dot(color_down_diff * color_down_diff, error_weight);\n+\n+ current_errorv += color_diff * color_diff;\n+ up_errorv += color_up_diff * color_up_diff;\n+ down_errorv += color_down_diff * color_down_diff;\n}\n- float current_error = current_errorv.lane<0>();\n- float up_error = up_errorv.lane<0>();\n- float down_error = down_errorv.lane<0>();\n+ vfloat4 error_weight = blk.channel_weight;\n+ float current_error = hadd_s(current_errorv * error_weight);\n+ float up_error = hadd_s(up_errorv * error_weight);\n+ float down_error = hadd_s(down_errorv * error_weight);\n// Check if the prev or next error is better, and if so use it\nif ((up_error < current_error) && (up_error < down_error))\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Scalarize error after loop in realign_weights_decimated
61,745
09.04.2022 19:22:54
-3,600
b6a3cdec7ad829a09458334b72c8d45a1aa54b2a
Add fast load variant for U8 LDR images
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_entry.cpp", "new_path": "Source/astcenc_entry.cpp", "diff": "@@ -842,6 +842,23 @@ static void compress_image(\n// Only the first thread actually runs the initializer\nctx.manage_compress.init(block_count);\n+\n+ // Determine if we can use an optimized load function\n+ bool needs_swz = (swizzle.r != ASTCENC_SWZ_R) || (swizzle.g != ASTCENC_SWZ_G) ||\n+ (swizzle.b != ASTCENC_SWZ_B) || (swizzle.a != ASTCENC_SWZ_A);\n+\n+ bool needs_hdr = (decode_mode == ASTCENC_PRF_HDR) ||\n+ (decode_mode == ASTCENC_PRF_HDR_RGB_LDR_A);\n+\n+ bool use_fast_load = !needs_swz && !needs_hdr &&\n+ block_z == 1 && image.data_type == ASTCENC_TYPE_U8;\n+\n+ auto load_func = fetch_image_block;\n+ if (use_fast_load)\n+ {\n+ load_func = fetch_image_block_fast_ldr;\n+ }\n+\n// All threads run this processing loop until there is no work remaining\nwhile (true)\n{\n@@ -900,7 +917,7 @@ static void compress_image(\n// Fetch the full block for compression\nif (use_full_block)\n{\n- fetch_image_block(decode_mode, image, blk, bsd, x * block_x, y * block_y, z * block_z, swizzle);\n+ load_func(decode_mode, image, blk, bsd, x * block_x, y * block_y, z * block_z, swizzle);\n}\n// Apply alpha scale RDO - substitute constant color block\nelse\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_image.cpp", "new_path": "Source/astcenc_image.cpp", "diff": "@@ -267,6 +267,78 @@ void fetch_image_block(\nblk.grayscale = grayscale;\n}\n+/* See header for documentation. */\n+void fetch_image_block_fast_ldr(\n+ astcenc_profile decode_mode,\n+ const astcenc_image& img,\n+ image_block& blk,\n+ const block_size_descriptor& bsd,\n+ unsigned int xpos,\n+ unsigned int ypos,\n+ unsigned int zpos,\n+ const astcenc_swizzle& swz\n+) {\n+ (void)swz;\n+ (void)decode_mode;\n+\n+ unsigned int xsize = img.dim_x;\n+ unsigned int ysize = img.dim_y;\n+\n+ blk.xpos = xpos;\n+ blk.ypos = ypos;\n+ blk.zpos = zpos;\n+\n+ // True if any non-identity swizzle\n+ int idx = 0;\n+\n+ vfloat4 data_min(1e38f);\n+ vfloat4 data_mean = vfloat4::zero();\n+ vfloat4 data_max(-1e38f);\n+ bool grayscale = true;\n+\n+ const uint8_t* plane = static_cast<const uint8_t*>(img.data[0]);\n+ for (unsigned int y = ypos; y < ypos + bsd.ydim; y++)\n+ {\n+ unsigned int yi = astc::min(y, ysize - 1);\n+\n+ for (unsigned int x = xpos; x < xpos + bsd.xdim; x++)\n+ {\n+ unsigned int xi = astc::min(x, xsize - 1);\n+\n+ vint4 datavi = vint4(plane + (4 * xsize * yi) + (4 * xi));\n+ vfloat4 datav = int_to_float(datavi) * (65535.0f / 255.0f);\n+\n+ // Compute block metadata\n+ data_min = min(data_min, datav);\n+ data_mean += datav;\n+ data_max = max(data_max, datav);\n+\n+ if (grayscale && (datav.lane<0>() != datav.lane<1>() || datav.lane<0>() != datav.lane<2>()))\n+ {\n+ grayscale = false;\n+ }\n+\n+ blk.data_r[idx] = datav.lane<0>();\n+ blk.data_g[idx] = datav.lane<1>();\n+ blk.data_b[idx] = datav.lane<2>();\n+ blk.data_a[idx] = datav.lane<3>();\n+\n+ idx++;\n+ }\n+ }\n+\n+ // Reverse the encoding so we store origin block in the original format\n+ blk.origin_texel = blk.texel(0) / 65535.0f;\n+\n+ // Store block metadata\n+ blk.rgb_lns[0] = 0;\n+ blk.alpha_lns[0] = 0;\n+ blk.data_min = data_min;\n+ blk.data_mean = data_mean / static_cast<float>(bsd.texel_count);\n+ blk.data_max = data_max;\n+ blk.grayscale = grayscale;\n+}\n+\n/* See header for documentation. */\nvoid write_image_block(\nastcenc_image& img,\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_internal.h", "new_path": "Source/astcenc_internal.h", "diff": "@@ -1766,7 +1766,7 @@ void compute_averages(\nconst avg_args& ag);\n/**\n- * @brief Fetch a single image block from the input image\n+ * @brief Fetch a single image block from the input image.\n*\n* @param decode_mode The compression color profile.\n* @param img The input image data.\n@@ -1788,7 +1788,32 @@ void fetch_image_block(\nconst astcenc_swizzle& swz);\n/**\n- * @brief Write a single image block from the output image\n+ * @brief Fetch a single image block from the input image.\n+ *\n+ * This specialized variant can be used only if the block is 2D LDR U8 data,\n+ * with no swizzle.\n+ *\n+ * @param decode_mode The compression color profile.\n+ * @param img The input image data.\n+ * @param[out] blk The image block to populate.\n+ * @param bsd The block size information.\n+ * @param xpos The block X coordinate in the input image.\n+ * @param ypos The block Y coordinate in the input image.\n+ * @param zpos The block Z coordinate in the input image.\n+ * @param swz The swizzle to apply on load.\n+ */\n+void fetch_image_block_fast_ldr(\n+ astcenc_profile decode_mode,\n+ const astcenc_image& img,\n+ image_block& blk,\n+ const block_size_descriptor& bsd,\n+ unsigned int xpos,\n+ unsigned int ypos,\n+ unsigned int zpos,\n+ const astcenc_swizzle& swz);\n+\n+/**\n+ * @brief Write a single image block from the output image.\n*\n* @param[out] img The input image data.\n* @param blk The image block to populate.\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Add fast load variant for U8 LDR images
61,745
19.04.2022 21:16:21
-3,600
4490469c7c145ee962a4a289219987b93947a77a
Enforce min color quant of QUANT_6 earlier This gives a small improvement in PSNR to some images as we don't waste an encoding candidate on an illegal quant option.
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_compress_symbolic.cpp", "new_path": "Source/astcenc_compress_symbolic.cpp", "diff": "@@ -565,7 +565,7 @@ static float compress_symbolic_block_for_partition_1plane(\nrgbo_colors[j],\npartition_format_specifiers[i][j],\nworkscb.color_values[j],\n- (quant_method)color_quant_level[i]);\n+ color_quant_level[i]);\n}\n// If all the color endpoint modes are the same, we get a few more bits to store colors;\n@@ -589,7 +589,7 @@ static float compress_symbolic_block_for_partition_1plane(\nrgbo_colors[j],\npartition_format_specifiers[i][j],\ncolorvals[j],\n- (quant_method)color_quant_level_mod[i]);\n+ color_quant_level_mod[i]);\n}\nif (color_formats_mod[0] == color_formats_mod[1]\n@@ -617,11 +617,6 @@ static float compress_symbolic_block_for_partition_1plane(\nworkscb.block_mode = qw_bm.mode_index;\nworkscb.block_type = SYM_BTYPE_NONCONST;\n- if (workscb.quant_mode < QUANT_6)\n- {\n- workscb.block_type = SYM_BTYPE_ERROR;\n- }\n-\n// Pre-realign test\nif (l == 0)\n{\n@@ -944,7 +939,7 @@ static float compress_symbolic_block_for_partition_2planes(\nrgbs_color, rgbo_color,\npartition_format_specifiers[i][0],\nworkscb.color_values[0],\n- (quant_method)color_quant_level[i]);\n+ color_quant_level[i]);\n// Store header fields\nworkscb.partition_count = 1;\n@@ -955,11 +950,6 @@ static float compress_symbolic_block_for_partition_2planes(\nworkscb.plane2_component = static_cast<int8_t>(plane2_component);\nworkscb.block_type = SYM_BTYPE_NONCONST;\n- if (workscb.quant_mode < 4)\n- {\n- workscb.block_type = SYM_BTYPE_ERROR;\n- }\n-\n// Pre-realign test\nif (l == 0)\n{\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_pick_best_endpoint_format.cpp", "new_path": "Source/astcenc_pick_best_endpoint_format.cpp", "diff": "@@ -526,7 +526,7 @@ static void compute_color_error_for_every_integer_count_and_quant_level(\nmode23mult *= 0.0005f; // Empirically determined ....\n// Pick among the available HDR endpoint modes\n- for (int i = 0; i < 8; i++)\n+ for (int i = QUANT_2; i < QUANT_16; i++)\n{\nbest_error[i][3] = ERROR_CALC_DEFAULT;\nbest_error[i][2] = ERROR_CALC_DEFAULT;\n@@ -539,7 +539,7 @@ static void compute_color_error_for_every_integer_count_and_quant_level(\nformat_of_choice[i][0] = FMT_HDR_LUMINANCE_LARGE_RANGE;\n}\n- for (int i = 8; i < 21; i++)\n+ for (int i = QUANT_16; i <= QUANT_256; i++)\n{\n// The base_quant_error should depend on the scale-factor that would be used during\n// actual encode of the color value\n@@ -574,7 +574,7 @@ static void compute_color_error_for_every_integer_count_and_quant_level(\n}\nelse\n{\n- for (int i = 0; i < 4; i++)\n+ for (int i = QUANT_2; i < QUANT_6; i++)\n{\nbest_error[i][3] = ERROR_CALC_DEFAULT;\nbest_error[i][2] = ERROR_CALC_DEFAULT;\n@@ -598,10 +598,10 @@ static void compute_color_error_for_every_integer_count_and_quant_level(\nfloat error_scale_oe_rgb = eci.can_offset_encode ? 0.25f : 1.0f;\n// Pick among the available LDR endpoint modes\n- for (int i = 4; i < 21; i++)\n+ for (int i = QUANT_6; i <= QUANT_256; i++)\n{\n// Offset encoding not possible at higher quant levels\n- if (i == 19)\n+ if (i >= QUANT_192)\n{\nerror_scale_oe_rgba = 1.0f;\nerror_scale_oe_rgb = 1.0f;\n@@ -697,7 +697,7 @@ static float one_partition_find_best_combination_for_bitcount(\nint& best_format\n) {\nint best_integer_count = 0;\n- float best_integer_count_error = 1e20f;\n+ float best_integer_count_error = ERROR_CALC_DEFAULT;\nfor (int integer_count = 1; integer_count <= 4; integer_count++)\n{\n@@ -705,7 +705,7 @@ static float one_partition_find_best_combination_for_bitcount(\nint quant_level = quant_mode_table[integer_count][bits_available];\n// Don't have enough bits to represent a given endpoint format at all!\n- if (quant_level < 0)\n+ if (quant_level < QUANT_6)\n{\ncontinue;\n}\n@@ -723,7 +723,7 @@ static float one_partition_find_best_combination_for_bitcount(\nbest_quant_level = (quant_method)ql;\nbest_format = FMT_LUMINANCE;\n- if (ql >= 0)\n+ if (ql >= QUANT_6)\n{\nbest_format = best_combined_format[ql][best_integer_count];\n}\n@@ -745,7 +745,7 @@ static void two_partitions_find_best_combination_for_every_quantization_and_inte\nfloat best_combined_error[21][7], // indexed by (quant-level, integer-pair-count-minus-2)\nint best_combined_format[21][7][2]\n) {\n- for (int i = 0; i < 21; i++)\n+ for (int i = QUANT_2; i <= QUANT_256; i++)\n{\nfor (int j = 0; j < 7; j++)\n{\n@@ -753,7 +753,7 @@ static void two_partitions_find_best_combination_for_every_quantization_and_inte\n}\n}\n- for (int quant = 5; quant < 21; quant++)\n+ for (int quant = QUANT_6; quant <= QUANT_256; quant++)\n{\nfor (int i = 0; i < 4; i++) // integer-count for first endpoint-pair\n{\n@@ -800,7 +800,7 @@ static float two_partitions_find_best_combination_for_bitcount(\nint* best_formats\n) {\nint best_integer_count = 0;\n- float best_integer_count_error = 1e20f;\n+ float best_integer_count_error = ERROR_CALC_DEFAULT;\nfor (int integer_count = 2; integer_count <= 8; integer_count++)\n{\n@@ -808,7 +808,7 @@ static float two_partitions_find_best_combination_for_bitcount(\nint quant_level = quant_mode_table[integer_count][bits_available];\n// Don't have enough bits to represent a given endpoint format at all!\n- if (quant_level < 0)\n+ if (quant_level < QUANT_6)\n{\nbreak;\n}\n@@ -827,7 +827,7 @@ static float two_partitions_find_best_combination_for_bitcount(\nbest_quant_level = (quant_method)ql;\nbest_quant_level_mod = (quant_method)ql_mod;\n- if (ql >= 0)\n+ if (ql >= QUANT_6)\n{\nfor (int i = 0; i < 2; i++)\n{\n@@ -859,7 +859,7 @@ static void three_partitions_find_best_combination_for_every_quantization_and_in\nfloat best_combined_error[21][10],\nint best_combined_format[21][10][3]\n) {\n- for (int i = 0; i < 21; i++)\n+ for (int i = QUANT_2; i <= QUANT_256; i++)\n{\nfor (int j = 0; j < 10; j++)\n{\n@@ -867,7 +867,7 @@ static void three_partitions_find_best_combination_for_every_quantization_and_in\n}\n}\n- for (int quant = 5; quant < 21; quant++)\n+ for (int quant = QUANT_6; quant <= QUANT_256; quant++)\n{\nfor (int i = 0; i < 4; i++) // integer-count for first endpoint-pair\n{\n@@ -925,7 +925,7 @@ static float three_partitions_find_best_combination_for_bitcount(\nint* best_formats\n) {\nint best_integer_count = 0;\n- float best_integer_count_error = 1e20f;\n+ float best_integer_count_error = ERROR_CALC_DEFAULT;\nfor (int integer_count = 3; integer_count <= 9; integer_count++)\n{\n@@ -933,7 +933,7 @@ static float three_partitions_find_best_combination_for_bitcount(\nint quant_level = quant_mode_table[integer_count][bits_available];\n// Don't have enough bits to represent a given endpoint format at all!\n- if (quant_level < 0)\n+ if (quant_level < QUANT_6)\n{\nbreak;\n}\n@@ -952,7 +952,7 @@ static float three_partitions_find_best_combination_for_bitcount(\nbest_quant_level = (quant_method)ql;\nbest_quant_level_mod = (quant_method)ql_mod;\n- if (ql >= 0)\n+ if (ql >= QUANT_6)\n{\nfor (int i = 0; i < 3; i++)\n{\n@@ -984,7 +984,7 @@ static void four_partitions_find_best_combination_for_every_quantization_and_int\nfloat best_combined_error[21][13],\nint best_combined_format[21][13][4]\n) {\n- for (int i = 0; i < 21; i++)\n+ for (int i = QUANT_2; i <= QUANT_256; i++)\n{\nfor (int j = 0; j < 13; j++)\n{\n@@ -992,7 +992,7 @@ static void four_partitions_find_best_combination_for_every_quantization_and_int\n}\n}\n- for (int quant = 5; quant < 21; quant++)\n+ for (int quant = QUANT_6; quant <= QUANT_256; quant++)\n{\nfor (int i = 0; i < 4; i++) // integer-count for first endpoint-pair\n{\n@@ -1061,7 +1061,7 @@ static float four_partitions_find_best_combination_for_bitcount(\nint* best_formats\n) {\nint best_integer_count = 0;\n- float best_integer_count_error = 1e20f;\n+ float best_integer_count_error = ERROR_CALC_DEFAULT;\nfor (int integer_count = 4; integer_count <= 9; integer_count++)\n{\n@@ -1069,7 +1069,7 @@ static float four_partitions_find_best_combination_for_bitcount(\nint quant_level = quant_mode_table[integer_count][bits_available];\n// Don't have enough bits to represent a given endpoint format at all!\n- if (quant_level < 0)\n+ if (quant_level < QUANT_6)\n{\nbreak;\n}\n@@ -1088,7 +1088,7 @@ static float four_partitions_find_best_combination_for_bitcount(\nbest_quant_level = (quant_method)ql;\nbest_quant_level_mod = (quant_method)ql_mod;\n- if (ql >= 0)\n+ if (ql >= QUANT_6)\n{\nfor (int i = 0; i < 4; i++)\n{\n@@ -1357,9 +1357,13 @@ unsigned int compute_ideal_endpoint_formats(\n}\nblock_mode[i] = best_error_weights[i];\n+\nquant_level[i] = best_quant_levels[best_error_weights[i]];\n- assert(quant_level[i] >= 0 && quant_level[i] < 21);\nquant_level_mod[i] = best_quant_levels_mod[best_error_weights[i]];\n+\n+ assert(quant_level[i] >= QUANT_6 && quant_level[i] <= QUANT_256);\n+ assert(quant_level_mod[i] >= QUANT_6 && quant_level_mod[i] <= QUANT_256);\n+\nfor (int j = 0; j < partition_count; j++)\n{\npartition_format_specifiers[i][j] = best_ep_formats[best_error_weights[i]][j];\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Enforce min color quant of QUANT_6 earlier This gives a small improvement in PSNR to some images as we don't waste an encoding candidate on an illegal quant option.
61,745
15.04.2022 15:10:04
-3,600
9403f0ede3dcb437efae92df76c0e8c5b489d14a
Hoist weight undecimation out of loop
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_ideal_endpoints_and_weights.cpp", "new_path": "Source/astcenc_ideal_endpoints_and_weights.cpp", "diff": "@@ -1065,17 +1065,46 @@ void recompute_ideal_colors_1plane(\nvfloat4 rgbo_vectors[BLOCK_MAX_PARTITIONS]\n) {\nunsigned int weight_count = di.weight_count;\n+ unsigned int total_texel_count = blk.texel_count;\nunsigned int partition_count = pi.partition_count;\npromise(weight_count > 0);\n+ promise(total_texel_count > 0);\npromise(partition_count > 0);\nconst quantization_and_transfer_table& qat = quant_and_xfer_tables[weight_quant_mode];\n- float dec_weight_quant_uvalue[BLOCK_MAX_WEIGHTS];\n+ float dec_weight[BLOCK_MAX_WEIGHTS];\nfor (unsigned int i = 0; i < weight_count; i++)\n{\n- dec_weight_quant_uvalue[i] = qat.unquantized_value[dec_weights_quant_pvalue[i]] * (1.0f / 64.0f);\n+ dec_weight[i] = qat.unquantized_value[dec_weights_quant_pvalue[i]] * (1.0f / 64.0f);\n+ }\n+\n+ alignas(ASTCENC_VECALIGN) float undec_weight[BLOCK_MAX_TEXELS];\n+ float* undec_weight_ref;\n+ if (di.max_texel_weight_count == 1)\n+ {\n+ undec_weight_ref = dec_weight;\n+ }\n+ else if (di.max_texel_weight_count <= 2)\n+ {\n+ for (unsigned int i = 0; i < total_texel_count; i += ASTCENC_SIMD_WIDTH)\n+ {\n+ vfloat weight = bilinear_infill_vla_2(di, dec_weight, i);\n+ storea(weight, undec_weight + i);\n+ }\n+\n+ undec_weight_ref = undec_weight;\n+ }\n+ else\n+ {\n+ for (unsigned int i = 0; i < total_texel_count; i += ASTCENC_SIMD_WIDTH)\n+ {\n+ vfloat weight = bilinear_infill_vla(di, dec_weight, i);\n+ storea(weight, undec_weight + i);\n+ }\n+\n+ undec_weight_ref = undec_weight;\n}\nvfloat4 rgba_sum(blk.data_mean * static_cast<float>(blk.texel_count));\n@@ -1127,20 +1156,7 @@ void recompute_ideal_colors_1plane(\nvfloat4 rgba = blk.texel(tix);\n- float idx0;\n- if (di.max_texel_weight_count == 1)\n- {\n- assert(tix < BLOCK_MAX_WEIGHTS);\n- idx0 = dec_weight_quant_uvalue[tix];\n- }\n- else if (di.max_texel_weight_count == 2)\n- {\n- idx0 = bilinear_infill_2(di, dec_weight_quant_uvalue, tix);\n- }\n- else\n- {\n- idx0 = bilinear_infill(di, dec_weight_quant_uvalue, tix);\n- }\n+ float idx0 = undec_weight_ref[tix];\nfloat om_idx0 = 1.0f - idx0;\nwmin1 = astc::min(idx0, wmin1);\n@@ -1273,18 +1289,61 @@ void recompute_ideal_colors_2planes(\nint plane2_component\n) {\nunsigned int weight_count = di.weight_count;\n+ unsigned int total_texel_count = blk.texel_count;\n+\n+ promise(total_texel_count > 0);\npromise(weight_count > 0);\nconst quantization_and_transfer_table *qat = &(quant_and_xfer_tables[weight_quant_mode]);\n- float dec_weights_quant_uvalue_plane1[BLOCK_MAX_WEIGHTS_2PLANE];\n- float dec_weights_quant_uvalue_plane2[BLOCK_MAX_WEIGHTS_2PLANE];\n+ float dec_weight_plane1[BLOCK_MAX_WEIGHTS_2PLANE];\n+ float dec_weight_plane2[BLOCK_MAX_WEIGHTS_2PLANE];\nassert(weight_count <= BLOCK_MAX_WEIGHTS_2PLANE);\nfor (unsigned int i = 0; i < weight_count; i++)\n{\n- dec_weights_quant_uvalue_plane1[i] = qat->unquantized_value[dec_weights_quant_pvalue_plane1[i]] * (1.0f / 64.0f);\n- dec_weights_quant_uvalue_plane2[i] = qat->unquantized_value[dec_weights_quant_pvalue_plane2[i]] * (1.0f / 64.0f);\n+ dec_weight_plane1[i] = qat->unquantized_value[dec_weights_quant_pvalue_plane1[i]] * (1.0f / 64.0f);\n+ dec_weight_plane2[i] = qat->unquantized_value[dec_weights_quant_pvalue_plane2[i]] * (1.0f / 64.0f);\n+ }\n+\n+ alignas(ASTCENC_VECALIGN) float undec_weight_plane1[BLOCK_MAX_TEXELS];\n+ alignas(ASTCENC_VECALIGN) float undec_weight_plane2[BLOCK_MAX_TEXELS];\n+\n+ float* undec_weight_plane1_ref;\n+ float* undec_weight_plane2_ref;\n+\n+ if (di.max_texel_weight_count == 1)\n+ {\n+ undec_weight_plane1_ref = dec_weight_plane1;\n+ undec_weight_plane2_ref = dec_weight_plane2;\n+ }\n+ else if (di.max_texel_weight_count <= 2)\n+ {\n+ for (unsigned int i = 0; i < total_texel_count; i += ASTCENC_SIMD_WIDTH)\n+ {\n+ vfloat weight = bilinear_infill_vla_2(di, dec_weight_plane1, i);\n+ storea(weight, undec_weight_plane1 + i);\n+\n+ weight = bilinear_infill_vla_2(di, dec_weight_plane2, i);\n+ storea(weight, undec_weight_plane2 + i);\n+ }\n+\n+ undec_weight_plane1_ref = undec_weight_plane1;\n+ undec_weight_plane2_ref = undec_weight_plane2;\n+ }\n+ else\n+ {\n+ for (unsigned int i = 0; i < total_texel_count; i += ASTCENC_SIMD_WIDTH)\n+ {\n+ vfloat weight = bilinear_infill_vla(di, dec_weight_plane1, i);\n+ storea(weight, undec_weight_plane1 + i);\n+\n+ weight = bilinear_infill_vla(di, dec_weight_plane2, i);\n+ storea(weight, undec_weight_plane2 + i);\n+ }\n+\n+ undec_weight_plane1_ref = undec_weight_plane1;\n+ undec_weight_plane2_ref = undec_weight_plane2;\n}\nunsigned int texel_count = bsd.texel_count;\n@@ -1323,20 +1382,7 @@ void recompute_ideal_colors_2planes(\n{\nvfloat4 rgba = blk.texel(j);\n- float idx0;\n- if (di.max_texel_weight_count == 1)\n- {\n- assert(j < BLOCK_MAX_WEIGHTS_2PLANE);\n- idx0 = dec_weights_quant_uvalue_plane1[j];\n- }\n- else if (di.max_texel_weight_count == 2)\n- {\n- idx0 = bilinear_infill_2(di, dec_weights_quant_uvalue_plane1, j);\n- }\n- else\n- {\n- idx0 = bilinear_infill(di, dec_weights_quant_uvalue_plane1, j);\n- }\n+ float idx0 = undec_weight_plane1_ref[j];\nfloat om_idx0 = 1.0f - idx0;\nwmin1 = astc::min(idx0, wmin1);\n@@ -1350,20 +1396,7 @@ void recompute_ideal_colors_2planes(\nmiddle1_sum_s += om_idx0 * idx0;\nright1_sum_s += idx0 * idx0;\n- float idx1;\n- if (di.max_texel_weight_count == 1)\n- {\n- assert(j < BLOCK_MAX_WEIGHTS_2PLANE);\n- idx1 = dec_weights_quant_uvalue_plane2[j];\n- }\n- else if (di.max_texel_weight_count == 2)\n- {\n- idx1 = bilinear_infill_2(di, dec_weights_quant_uvalue_plane2, j);\n- }\n- else\n- {\n- idx1 = bilinear_infill(di, dec_weights_quant_uvalue_plane2, j);\n- }\n+ float idx1 = undec_weight_plane2_ref[j];\nfloat om_idx1 = 1.0f - idx1;\nwmin2 = astc::min(idx1, wmin2);\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Hoist weight undecimation out of loop
61,745
15.04.2022 14:12:17
-3,600
0c3cddc5cca31784ac91029a076ed7717f9a3ffc
Use struct params for realign_weights
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_compress_symbolic.cpp", "new_path": "Source/astcenc_compress_symbolic.cpp", "diff": "@@ -65,16 +65,12 @@ static void merge_endpoints(\n* @param bsd The block size information.\n* @param blk The image block color data to compress.\n* @param[out] scb The symbolic compressed block output.\n- * @param[out] dec_weights_quant_pvalue_plane1 The weights for plane 1.\n- * @param[out] dec_weights_quant_pvalue_plane2 The weights for plane 2, or @c nullptr if 1 plane.\n*/\nstatic bool realign_weights_undecimated(\nastcenc_profile decode_mode,\nconst block_size_descriptor& bsd,\nconst image_block& blk,\n- symbolic_compressed_block& scb,\n- uint8_t* dec_weights_quant_pvalue_plane1,\n- uint8_t* dec_weights_quant_pvalue_plane2\n+ symbolic_compressed_block& scb\n) {\n// Get the partition descriptor\nunsigned int partition_count = scb.partition_count;\n@@ -110,7 +106,7 @@ static bool realign_weights_undecimated(\nendpnt1[pa_idx]);\n}\n- uint8_t* dec_weights_quant_pvalue = dec_weights_quant_pvalue_plane1;\n+ uint8_t* dec_weights_quant_pvalue = scb.weights;\nbool adjustments = false;\n// For each plane and partition ...\n@@ -173,7 +169,7 @@ static bool realign_weights_undecimated(\n}\n// Prepare iteration for plane 2\n- dec_weights_quant_pvalue = dec_weights_quant_pvalue_plane2;\n+ dec_weights_quant_pvalue += WEIGHTS_PLANE2_OFFSET;\nplane_mask = ~plane_mask;\n}\n@@ -191,16 +187,12 @@ static bool realign_weights_undecimated(\n* @param bsd The block size information.\n* @param blk The image block color data to compress.\n* @param[out] scb The symbolic compressed block output.\n- * @param[out] dec_weights_quant_pvalue_plane1 The weights for plane 1.\n- * @param[out] dec_weights_quant_pvalue_plane2 The weights for plane 2, or @c nullptr if 1 plane.\n*/\nstatic bool realign_weights_decimated(\nastcenc_profile decode_mode,\nconst block_size_descriptor& bsd,\nconst image_block& blk,\n- symbolic_compressed_block& scb,\n- uint8_t* dec_weights_quant_pvalue_plane1,\n- uint8_t* dec_weights_quant_pvalue_plane2\n+ symbolic_compressed_block& scb\n) {\n// Get the partition descriptor\nunsigned int partition_count = scb.partition_count;\n@@ -244,7 +236,7 @@ static bool realign_weights_decimated(\nuint8_t uq_pl_weights[BLOCK_MAX_WEIGHTS];\nfloat uq_pl_weightsf[BLOCK_MAX_WEIGHTS];\n- uint8_t* dec_weights_quant_pvalue = dec_weights_quant_pvalue_plane1;\n+ uint8_t* dec_weights_quant_pvalue = scb.weights;\nbool adjustments = false;\n// For each plane and partition ...\n@@ -351,7 +343,7 @@ static bool realign_weights_decimated(\n}\n// Prepare iteration for plane 2\n- dec_weights_quant_pvalue = dec_weights_quant_pvalue_plane2;\n+ dec_weights_quant_pvalue += WEIGHTS_PLANE2_OFFSET;\nplane_mask = ~plane_mask;\n}\n@@ -660,14 +652,12 @@ static float compress_symbolic_block_for_partition_1plane(\nif (di.weight_count != bsd.texel_count)\n{\nadjustments = realign_weights_decimated(\n- config.profile, bsd, blk, workscb,\n- workscb.weights, nullptr);\n+ config.profile, bsd, blk, workscb);\n}\nelse\n{\nadjustments = realign_weights_undecimated(\n- config.profile, bsd, blk, workscb,\n- workscb.weights, nullptr);\n+ config.profile, bsd, blk, workscb);\n}\n// Post-realign test\n@@ -994,14 +984,12 @@ static float compress_symbolic_block_for_partition_2planes(\nif (di.weight_count != bsd.texel_count)\n{\nadjustments = realign_weights_decimated(\n- config.profile, bsd, blk, workscb,\n- workscb.weights, workscb.weights + WEIGHTS_PLANE2_OFFSET);\n+ config.profile, bsd, blk, workscb);\n}\nelse\n{\nadjustments = realign_weights_undecimated(\n- config.profile, bsd, blk, workscb,\n- workscb.weights, workscb.weights + WEIGHTS_PLANE2_OFFSET);\n+ config.profile, bsd, blk, workscb);\n}\n// Post-realign test\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Use struct params for realign_weights
61,745
19.04.2022 22:33:03
-3,600
5b36b530789b2c11e448f4a629fdadb190d4b507
Use selects in compute_angular_endpoints_lwc
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_weight_align.cpp", "new_path": "Source/astcenc_weight_align.cpp", "diff": "@@ -292,7 +292,7 @@ static void compute_angular_endpoints_for_quant_levels(\n// Check best error against record N\nvfloat4 best_result = best_results[idx_span];\n- vfloat4 new_result = vfloat4(error[i], (float)i, 0.0f, 0.0f);\n+ vfloat4 new_result = vfloat4(error[i], static_cast<float>(i), 0.0f, 0.0f);\nvmask4 mask1(best_result.lane<0>() > error[i]);\nbest_results[idx_span] = select(best_result, new_result, mask1);\n@@ -317,7 +317,7 @@ static void compute_angular_endpoints_for_quant_levels(\nfor (unsigned int i = 0; i <= max_quant_level; i++)\n{\nunsigned int q = quantization_steps_for_level[i];\n- int bsi = (int)best_results[q].lane<1>();\n+ int bsi = static_cast<int>(best_results[q].lane<1>());\n// Did we find anything?\n#if defined(ASTCENC_DIAGNOSTICS)\n@@ -447,15 +447,13 @@ static void compute_angular_endpoints_for_quant_levels_lwc(\n// For each quantization level, find the best error terms. Use packed vectors so data-dependent\n// branches can become selects. This involves some integer to float casts, but the values are\n// small enough so they never round the wrong way.\n- float best_error[ANGULAR_STEPS];\n- int best_index[ANGULAR_STEPS];\n+ vfloat4 best_results[ANGULAR_STEPS];\n// Initialize the array to some safe defaults\npromise(max_quant_steps > 0);\nfor (unsigned int i = 0; i < (max_quant_steps + 4); i++)\n{\n- best_error[i] = ERROR_CALC_DEFAULT;\n- best_index[i] = -1;\n+ best_results[i] = vfloat4(ERROR_CALC_DEFAULT, -1.0f, 0.0f, 0.0f);\n}\npromise(max_angular_steps > 0);\n@@ -464,18 +462,16 @@ static void compute_angular_endpoints_for_quant_levels_lwc(\nint idx_span = weight_span[i];\n// Check best error against record N\n- float current_best = best_error[idx_span];\n- if (error[i] < current_best)\n- {\n- best_error[idx_span] = error[i];\n- best_index[idx_span] = i;\n- }\n+ vfloat4 current_best = best_results[idx_span];\n+ vfloat4 candidate = vfloat4(error[i], static_cast<float>(i), 0.0f, 0.0f);\n+ vmask4 mask(current_best.lane<0>() > error[i]);\n+ best_results[idx_span] = select(current_best, candidate, mask);\n}\nfor (unsigned int i = 0; i <= max_quant_level; i++)\n{\nunsigned int q = quantization_steps_for_level[i];\n- int bsi = best_index[q];\n+ int bsi = static_cast<int>(best_results[q].lane<1>());\n// Did we find anything?\n#if defined(ASTCENC_DIAGNOSTICS)\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Use selects in compute_angular_endpoints_lwc
61,745
23.04.2022 12:58:59
-3,600
b6d7769e6f5c695c4d08502eea8936628fac451f
Add a masked accumulator
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_averages_and_directions.cpp", "new_path": "Source/astcenc_averages_and_directions.cpp", "diff": "@@ -500,8 +500,7 @@ void compute_error_squared_rgba(\n+ (ew_b * uncor_dist2 * uncor_dist2)\n+ (ew_a * uncor_dist3 * uncor_dist3);\n- uncor_err = select(vfloat::zero(), uncor_err, mask);\n- haccumulate(uncor_errorsumv, uncor_err);\n+ haccumulate(uncor_errorsumv, uncor_err, mask);\n// Process samechroma data\nvfloat samec_param = (data_r * l_samec_bs0)\n@@ -522,8 +521,7 @@ void compute_error_squared_rgba(\n+ (ew_b * samec_dist2 * samec_dist2)\n+ (ew_a * samec_dist3 * samec_dist3);\n- samec_err = select(vfloat::zero(), samec_err, mask);\n- haccumulate(samec_errorsumv, samec_err);\n+ haccumulate(samec_errorsumv, samec_err, mask);\nlane_ids += vint(ASTCENC_SIMD_WIDTH);\n}\n@@ -639,8 +637,7 @@ void compute_error_squared_rgb(\n+ (ew_g * uncor_dist1 * uncor_dist1)\n+ (ew_b * uncor_dist2 * uncor_dist2);\n- uncor_err = select(vfloat::zero(), uncor_err, mask);\n- haccumulate(uncor_errorsumv, uncor_err);\n+ haccumulate(uncor_errorsumv, uncor_err, mask);\n// Process samechroma data\nvfloat samec_param = (data_r * l_samec_bs0)\n@@ -658,8 +655,7 @@ void compute_error_squared_rgb(\n+ (ew_g * samec_dist1 * samec_dist1)\n+ (ew_b * samec_dist2 * samec_dist2);\n- samec_err = select(vfloat::zero(), samec_err, mask);\n- haccumulate(samec_errorsumv, samec_err);\n+ haccumulate(samec_errorsumv, samec_err, mask);\nlane_ids += vint(ASTCENC_SIMD_WIDTH);\n}\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_decompress_symbolic.cpp", "new_path": "Source/astcenc_decompress_symbolic.cpp", "diff": "@@ -617,9 +617,7 @@ float compute_symbolic_block_difference_1plane_1partition(\n// Mask off bad lanes\nvmask mask = lane_id < vint(texel_count);\nlane_id += vint(ASTCENC_SIMD_WIDTH);\n- metric = select(vfloat::zero(), metric, mask);\n-\n- haccumulate(summav, metric);\n+ haccumulate(summav, metric, mask);\n}\nreturn hadd_s(summav);\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_pick_best_endpoint_format.cpp", "new_path": "Source/astcenc_pick_best_endpoint_format.cpp", "diff": "@@ -132,8 +132,8 @@ static void compute_error_squared_rgb_single_partition(\nvfloat data_a = gatherf(blk.data_a, tix);\nvfloat alpha_diff = data_a - default_a;\nalpha_diff = alpha_diff * alpha_diff;\n- alpha_diff = select(vfloat::zero(), alpha_diff, mask);\n- haccumulate(a_drop_errv, alpha_diff);\n+\n+ haccumulate(a_drop_errv, alpha_diff, mask);\nvfloat data_r = gatherf(blk.data_r, tix);\nvfloat data_g = gatherf(blk.data_g, tix);\n@@ -152,8 +152,7 @@ static void compute_error_squared_rgb_single_partition(\n+ dist1 * dist1 * ews.lane<1>()\n+ dist2 * dist2 * ews.lane<2>();\n- error = select(vfloat::zero(), error, mask);\n- haccumulate(uncor_errv, error);\n+ haccumulate(uncor_errv, error, mask);\n// Compute same chroma error - no \"amod\", its always zero\nparam = data_r * samec_bs0\n@@ -168,8 +167,7 @@ static void compute_error_squared_rgb_single_partition(\n+ dist1 * dist1 * ews.lane<1>()\n+ dist2 * dist2 * ews.lane<2>();\n- error = select(vfloat::zero(), error, mask);\n- haccumulate(samec_errv, error);\n+ haccumulate(samec_errv, error, mask);\n// Compute rgbl error\nparam = data_r * rgbl_bs0\n@@ -184,8 +182,7 @@ static void compute_error_squared_rgb_single_partition(\n+ dist1 * dist1 * ews.lane<1>()\n+ dist2 * dist2 * ews.lane<2>();\n- error = select(vfloat::zero(), error, mask);\n- haccumulate(rgbl_errv, error);\n+ haccumulate(rgbl_errv, error, mask);\n// Compute luma error - no \"amod\", its always zero\nparam = data_r * l_bs0\n@@ -200,8 +197,7 @@ static void compute_error_squared_rgb_single_partition(\n+ dist1 * dist1 * ews.lane<1>()\n+ dist2 * dist2 * ews.lane<2>();\n- error = select(vfloat::zero(), error, mask);\n- haccumulate(l_errv, error);\n+ haccumulate(l_errv, error, mask);\n}\na_drop_err = hadd_s(a_drop_errv * ews.lane<3>());\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_avx2_8.h", "new_path": "Source/astcenc_vecmathlib_avx2_8.h", "diff": "@@ -598,17 +598,6 @@ ASTCENC_SIMD_INLINE vint8 select(vint8 a, vint8 b, vmask8 cond)\nreturn vint8(_mm256_castps_si256(_mm256_blendv_ps(av, bv, cond.m)));\n}\n-/**\n- * @brief Debug function to print a vector of ints.\n- */\n-ASTCENC_SIMD_INLINE void print(vint8 a)\n-{\n- alignas(ASTCENC_VECALIGN) int v[8];\n- storea(a, v);\n- printf(\"v8_i32:\\n %8d %8d %8d %8d %8d %8d %8d %8d\\n\",\n- v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);\n-}\n-\n// ============================================================================\n// vfloat4 operators and functions\n// ============================================================================\n@@ -885,6 +874,14 @@ ASTCENC_SIMD_INLINE float hadd_s(vfloat8 a)\nreturn hadd_s(lo) + hadd_s(hi);\n}\n+/**\n+ * @brief Return lanes from @c b if MSB of @c cond is set, else @c a.\n+ */\n+ASTCENC_SIMD_INLINE vfloat8 select(vfloat8 a, vfloat8 b, vmask8 cond)\n+{\n+ return vfloat8(_mm256_blendv_ps(a.m, b.m, cond.m));\n+}\n+\n/**\n* @brief Accumulate the full horizontal sum of a vector.\n*/\n@@ -916,19 +913,28 @@ ASTCENC_SIMD_INLINE void haccumulate(vfloat4& accum, vfloat8 a)\n}\n/**\n- * @brief Return the sqrt of the lanes in the vector.\n+ * @brief Accumulate masked lane-wise sums for a vector, folded 4-wide.\n*/\n-ASTCENC_SIMD_INLINE vfloat8 sqrt(vfloat8 a)\n+ASTCENC_SIMD_INLINE void haccumulate(vfloat4& accum, vfloat8 a, vmask8 m)\n{\n- return vfloat8(_mm256_sqrt_ps(a.m));\n+ a = select(vfloat8::zero(), a, m);\n+\n+ // Two sequential 4-wide accumulates gives invariance with 4-wide code.\n+ // Note that this approach gives higher error in the sum; adding the two\n+ // smaller numbers together first would be more accurate.\n+ vfloat4 lo(_mm256_extractf128_ps(a.m, 0));\n+ haccumulate(accum, lo);\n+\n+ vfloat4 hi(_mm256_extractf128_ps(a.m, 1));\n+ haccumulate(accum, hi);\n}\n/**\n- * @brief Return lanes from @c b if MSB of @c cond is set, else @c a.\n+ * @brief Return the sqrt of the lanes in the vector.\n*/\n-ASTCENC_SIMD_INLINE vfloat8 select(vfloat8 a, vfloat8 b, vmask8 cond)\n+ASTCENC_SIMD_INLINE vfloat8 sqrt(vfloat8 a)\n{\n- return vfloat8(_mm256_blendv_ps(a.m, b.m, cond.m));\n+ return vfloat8(_mm256_sqrt_ps(a.m));\n}\n/**\n@@ -995,6 +1001,17 @@ ASTCENC_SIMD_INLINE vfloat8 int_as_float(vint8 a)\nreturn vfloat8(_mm256_castsi256_ps(a.m));\n}\n+/**\n+ * @brief Debug function to print a vector of ints.\n+ */\n+ASTCENC_SIMD_INLINE void print(vint8 a)\n+{\n+ alignas(ASTCENC_VECALIGN) int v[8];\n+ storea(a, v);\n+ printf(\"v8_i32:\\n %8d %8d %8d %8d %8d %8d %8d %8d\\n\",\n+ v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);\n+}\n+\n/**\n* @brief Debug function to print a vector of floats.\n*/\n@@ -1007,4 +1024,12 @@ ASTCENC_SIMD_INLINE void print(vfloat8 a)\n(double)v[4], (double)v[5], (double)v[6], (double)v[7]);\n}\n+/**\n+ * @brief Debug function to print a vector of masks.\n+ */\n+ASTCENC_SIMD_INLINE void print(vmask8 a)\n+{\n+ print(select(vint8(0), vint8(1), a));\n+}\n+\n#endif // #ifndef ASTC_VECMATHLIB_AVX2_8_H_INCLUDED\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_common_4.h", "new_path": "Source/astcenc_vecmathlib_common_4.h", "diff": "@@ -129,17 +129,6 @@ ASTCENC_SIMD_INLINE int hadd_rgb_s(vint4 a)\nreturn a.lane<0>() + a.lane<1>() + a.lane<2>();\n}\n-/**\n- * @brief Debug function to print a vector of ints.\n- */\n-ASTCENC_SIMD_INLINE void print(vint4 a)\n-{\n- alignas(16) int v[4];\n- storea(a, v);\n- printf(\"v4_i32:\\n %8d %8d %8d %8d\\n\",\n- v[0], v[1], v[2], v[3]);\n-}\n-\n// ============================================================================\n// vfloat4 operators and functions\n// ============================================================================\n@@ -297,6 +286,15 @@ ASTCENC_SIMD_INLINE void haccumulate(vfloat4& accum, vfloat4 a)\naccum = accum + a;\n}\n+/**\n+ * @brief Accumulate lane-wise sums for a masked vector.\n+ */\n+ASTCENC_SIMD_INLINE void haccumulate(vfloat4& accum, vfloat4 a, vmask4 m)\n+{\n+ a = select(vfloat4::zero(), a, m);\n+ accum = accum + a;\n+}\n+\n/**\n* @brief Return the horizontal sum of RGB vector lanes as a scalar.\n*/\n@@ -350,6 +348,17 @@ ASTCENC_SIMD_INLINE vfloat4 recip(vfloat4 b)\nreturn 1.0f / b;\n}\n+/**\n+ * @brief Debug function to print a vector of ints.\n+ */\n+ASTCENC_SIMD_INLINE void print(vint4 a)\n+{\n+ alignas(16) int v[4];\n+ storea(a, v);\n+ printf(\"v4_i32:\\n %8d %8d %8d %8d\\n\",\n+ v[0], v[1], v[2], v[3]);\n+}\n+\n/**\n* @brief Debug function to print a vector of floats.\n*/\n@@ -361,4 +370,12 @@ ASTCENC_SIMD_INLINE void print(vfloat4 a)\n(double)v[0], (double)v[1], (double)v[2], (double)v[3]);\n}\n+/**\n+ * @brief Debug function to print a vector of masks.\n+ */\n+ASTCENC_SIMD_INLINE void print(vmask4 a)\n+{\n+ print(select(vint4(0), vint4(1), a));\n+}\n+\n#endif // #ifndef ASTC_VECMATHLIB_COMMON_4_H_INCLUDED\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Add a masked accumulator
61,745
23.04.2022 21:02:03
-3,600
502d8aa4b1f5c52fc38ddac2db581f6e88bad30e
Add sequential select-based partition mean computation
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_averages_and_directions.cpp", "new_path": "Source/astcenc_averages_and_directions.cpp", "diff": "#include <cassert>\n+/* See header for documentation. */\n+void compute_partition_averages_rgb(\n+ const partition_info& pi,\n+ const image_block& blk,\n+ vfloat4 averages[BLOCK_MAX_PARTITIONS]\n+) {\n+ unsigned int partition_count = pi.partition_count;\n+ unsigned int texel_count = blk.texel_count;\n+ promise(texel_count > 0);\n+\n+ // For 1 partition just use the precomputed mean\n+ if (partition_count == 1)\n+ {\n+ averages[0] = blk.data_mean.swz<0, 1, 2>();\n+ }\n+ // For 2 partitions scan results for partition 0, compute partition 1\n+ else if (partition_count == 2)\n+ {\n+ vfloat4 pp_avg_rgb[3] {};\n+\n+ vint lane_id = vint::lane_id();\n+ for (unsigned int i = 0; i < texel_count; i += ASTCENC_SIMD_WIDTH)\n+ {\n+ vint texel_partition(pi.partition_of_texel + i);\n+\n+ vmask lane_mask = lane_id < vint(texel_count);\n+ lane_id += vint(ASTCENC_SIMD_WIDTH);\n+\n+ vmask p0_mask = lane_mask & (texel_partition == vint(0));\n+\n+ vfloat data_r = loada(blk.data_r + i);\n+ haccumulate(pp_avg_rgb[0], data_r, p0_mask);\n+\n+ vfloat data_g = loada(blk.data_g + i);\n+ haccumulate(pp_avg_rgb[1], data_g, p0_mask);\n+\n+ vfloat data_b = loada(blk.data_b + i);\n+ haccumulate(pp_avg_rgb[2], data_b, p0_mask);\n+ }\n+\n+ vfloat4 block_total = blk.data_mean.swz<0, 1, 2>() * static_cast<float>(blk.texel_count);\n+\n+ vfloat4 p0_total = vfloat3(hadd_s(pp_avg_rgb[0]),\n+ hadd_s(pp_avg_rgb[1]),\n+ hadd_s(pp_avg_rgb[2]));\n+\n+ vfloat4 p1_total = block_total - p0_total;\n+\n+ averages[0] = p0_total / static_cast<float>(pi.partition_texel_count[0]);\n+ averages[1] = p1_total / static_cast<float>(pi.partition_texel_count[1]);\n+ }\n+ // For 3 partitions scan results for partition 0/1, compute partition 2\n+ else if (partition_count == 3)\n+ {\n+ vfloat4 pp_avg_rgb[2][3] {};\n+\n+ vint lane_id = vint::lane_id();\n+ for (unsigned int i = 0; i < texel_count; i += ASTCENC_SIMD_WIDTH)\n+ {\n+ vint texel_partition(pi.partition_of_texel + i);\n+\n+ vmask lane_mask = lane_id < vint(texel_count);\n+ lane_id += vint(ASTCENC_SIMD_WIDTH);\n+\n+ vmask p0_mask = lane_mask & (texel_partition == vint(0));\n+ vmask p1_mask = lane_mask & (texel_partition == vint(1));\n+\n+ vfloat data_r = loada(blk.data_r + i);\n+ haccumulate(pp_avg_rgb[0][0], data_r, p0_mask);\n+ haccumulate(pp_avg_rgb[1][0], data_r, p1_mask);\n+\n+ vfloat data_g = loada(blk.data_g + i);\n+ haccumulate(pp_avg_rgb[0][1], data_g, p0_mask);\n+ haccumulate(pp_avg_rgb[1][1], data_g, p1_mask);\n+\n+ vfloat data_b = loada(blk.data_b + i);\n+ haccumulate(pp_avg_rgb[0][2], data_b, p0_mask);\n+ haccumulate(pp_avg_rgb[1][2], data_b, p1_mask);\n+ }\n+\n+ vfloat4 block_total = blk.data_mean.swz<0, 1, 2>() * static_cast<float>(blk.texel_count);\n+\n+ vfloat4 p0_total = vfloat3(hadd_s(pp_avg_rgb[0][0]),\n+ hadd_s(pp_avg_rgb[0][1]),\n+ hadd_s(pp_avg_rgb[0][2]));\n+\n+ vfloat4 p1_total = vfloat3(hadd_s(pp_avg_rgb[1][0]),\n+ hadd_s(pp_avg_rgb[1][1]),\n+ hadd_s(pp_avg_rgb[1][2]));\n+\n+ vfloat4 p2_total = block_total - p0_total - p1_total;\n+\n+ averages[0] = p0_total / static_cast<float>(pi.partition_texel_count[0]);\n+ averages[1] = p1_total / static_cast<float>(pi.partition_texel_count[1]);\n+ averages[2] = p2_total / static_cast<float>(pi.partition_texel_count[2]);\n+ }\n+ else\n+ {\n+ // For 4 partitions scan results for partition 0/1/2, compute partition 3\n+ vfloat4 pp_avg_rgb[3][3] {};\n+\n+ vint lane_id = vint::lane_id();\n+ for (unsigned int i = 0; i < texel_count; i += ASTCENC_SIMD_WIDTH)\n+ {\n+ vint texel_partition(pi.partition_of_texel + i);\n+\n+ vmask lane_mask = lane_id < vint(texel_count);\n+ lane_id += vint(ASTCENC_SIMD_WIDTH);\n+\n+ vmask p0_mask = lane_mask & (texel_partition == vint(0));\n+ vmask p1_mask = lane_mask & (texel_partition == vint(1));\n+ vmask p2_mask = lane_mask & (texel_partition == vint(2));\n+\n+ vfloat data_r = loada(blk.data_r + i);\n+ haccumulate(pp_avg_rgb[0][0], data_r, p0_mask);\n+ haccumulate(pp_avg_rgb[1][0], data_r, p1_mask);\n+ haccumulate(pp_avg_rgb[2][0], data_r, p2_mask);\n+\n+ vfloat data_g = loada(blk.data_g + i);\n+ haccumulate(pp_avg_rgb[0][1], data_g, p0_mask);\n+ haccumulate(pp_avg_rgb[1][1], data_g, p1_mask);\n+ haccumulate(pp_avg_rgb[2][1], data_g, p2_mask);\n+\n+ vfloat data_b = loada(blk.data_b + i);\n+ haccumulate(pp_avg_rgb[0][2], data_b, p0_mask);\n+ haccumulate(pp_avg_rgb[1][2], data_b, p1_mask);\n+ haccumulate(pp_avg_rgb[2][2], data_b, p2_mask);\n+ }\n+\n+ vfloat4 block_total = blk.data_mean.swz<0, 1, 2>() * static_cast<float>(blk.texel_count);\n+\n+ vfloat4 p0_total = vfloat3(hadd_s(pp_avg_rgb[0][0]),\n+ hadd_s(pp_avg_rgb[0][1]),\n+ hadd_s(pp_avg_rgb[0][2]));\n+\n+ vfloat4 p1_total = vfloat3(hadd_s(pp_avg_rgb[1][0]),\n+ hadd_s(pp_avg_rgb[1][1]),\n+ hadd_s(pp_avg_rgb[1][2]));\n+\n+ vfloat4 p2_total = vfloat3(hadd_s(pp_avg_rgb[2][0]),\n+ hadd_s(pp_avg_rgb[2][1]),\n+ hadd_s(pp_avg_rgb[2][2]));\n+\n+ vfloat4 p3_total = block_total - p0_total - p1_total- p2_total;\n+\n+ averages[0] = p0_total / static_cast<float>(pi.partition_texel_count[0]);\n+ averages[1] = p1_total / static_cast<float>(pi.partition_texel_count[1]);\n+ averages[2] = p2_total / static_cast<float>(pi.partition_texel_count[2]);\n+ averages[3] = p3_total / static_cast<float>(pi.partition_texel_count[3]);\n+ }\n+}\n+\n+/* See header for documentation. */\n+void compute_partition_averages_rgba(\n+ const partition_info& pi,\n+ const image_block& blk,\n+ vfloat4 averages[BLOCK_MAX_PARTITIONS]\n+) {\n+ unsigned int partition_count = pi.partition_count;\n+ unsigned int texel_count = blk.texel_count;\n+ promise(texel_count > 0);\n+\n+ // For 1 partition just use the precomputed mean\n+ if (partition_count == 1)\n+ {\n+ averages[0] = blk.data_mean;\n+ }\n+ // For 2 partitions scan results for partition 0, compute partition 1\n+ else if (partition_count == 2)\n+ {\n+ vfloat4 pp_avg_rgba[4] {};\n+\n+ vint lane_id = vint::lane_id();\n+ for (unsigned int i = 0; i < texel_count; i += ASTCENC_SIMD_WIDTH)\n+ {\n+ vint texel_partition(pi.partition_of_texel + i);\n+\n+ vmask lane_mask = lane_id < vint(texel_count);\n+ lane_id += vint(ASTCENC_SIMD_WIDTH);\n+\n+ vmask p0_mask = lane_mask & (texel_partition == vint(0));\n+\n+ vfloat data_r = loada(blk.data_r + i);\n+ haccumulate(pp_avg_rgba[0], data_r, p0_mask);\n+\n+ vfloat data_g = loada(blk.data_g + i);\n+ haccumulate(pp_avg_rgba[1], data_g, p0_mask);\n+\n+ vfloat data_b = loada(blk.data_b + i);\n+ haccumulate(pp_avg_rgba[2], data_b, p0_mask);\n+\n+ vfloat data_a = loada(blk.data_a + i);\n+ haccumulate(pp_avg_rgba[3], data_a, p0_mask);\n+ }\n+\n+ vfloat4 block_total = blk.data_mean * static_cast<float>(blk.texel_count);\n+\n+ vfloat4 p0_total = vfloat4(hadd_s(pp_avg_rgba[0]),\n+ hadd_s(pp_avg_rgba[1]),\n+ hadd_s(pp_avg_rgba[2]),\n+ hadd_s(pp_avg_rgba[3]));\n+\n+ vfloat4 p1_total = block_total - p0_total;\n+\n+ averages[0] = p0_total / static_cast<float>(pi.partition_texel_count[0]);\n+ averages[1] = p1_total / static_cast<float>(pi.partition_texel_count[1]);\n+ }\n+ // For 3 partitions scan results for partition 0/1, compute partition 2\n+ else if (partition_count == 3)\n+ {\n+ vfloat4 pp_avg_rgba[2][4] {};\n+\n+ vint lane_id = vint::lane_id();\n+ for (unsigned int i = 0; i < texel_count; i += ASTCENC_SIMD_WIDTH)\n+ {\n+ vint texel_partition(pi.partition_of_texel + i);\n+\n+ vmask lane_mask = lane_id < vint(texel_count);\n+ lane_id += vint(ASTCENC_SIMD_WIDTH);\n+\n+ vmask p0_mask = lane_mask & (texel_partition == vint(0));\n+ vmask p1_mask = lane_mask & (texel_partition == vint(1));\n+\n+ vfloat data_r = loada(blk.data_r + i);\n+ haccumulate(pp_avg_rgba[0][0], data_r, p0_mask);\n+ haccumulate(pp_avg_rgba[1][0], data_r, p1_mask);\n+\n+ vfloat data_g = loada(blk.data_g + i);\n+ haccumulate(pp_avg_rgba[0][1], data_g, p0_mask);\n+ haccumulate(pp_avg_rgba[1][1], data_g, p1_mask);\n+\n+ vfloat data_b = loada(blk.data_b + i);\n+ haccumulate(pp_avg_rgba[0][2], data_b, p0_mask);\n+ haccumulate(pp_avg_rgba[1][2], data_b, p1_mask);\n+\n+ vfloat data_a = loada(blk.data_a + i);\n+ haccumulate(pp_avg_rgba[0][3], data_a, p0_mask);\n+ haccumulate(pp_avg_rgba[1][3], data_a, p1_mask);\n+ }\n+\n+ vfloat4 block_total = blk.data_mean * static_cast<float>(blk.texel_count);\n+\n+ vfloat4 p0_total = vfloat4(hadd_s(pp_avg_rgba[0][0]),\n+ hadd_s(pp_avg_rgba[0][1]),\n+ hadd_s(pp_avg_rgba[0][2]),\n+ hadd_s(pp_avg_rgba[0][3]));\n+\n+ vfloat4 p1_total = vfloat4(hadd_s(pp_avg_rgba[1][0]),\n+ hadd_s(pp_avg_rgba[1][1]),\n+ hadd_s(pp_avg_rgba[1][2]),\n+ hadd_s(pp_avg_rgba[1][3]));\n+\n+ vfloat4 p2_total = block_total - p0_total - p1_total;\n+\n+ averages[0] = p0_total / static_cast<float>(pi.partition_texel_count[0]);\n+ averages[1] = p1_total / static_cast<float>(pi.partition_texel_count[1]);\n+ averages[2] = p2_total / static_cast<float>(pi.partition_texel_count[2]);\n+ }\n+ else\n+ {\n+ // For 4 partitions scan results for partition 0/1/2, compute partition 3\n+ vfloat4 pp_avg_rgba[3][4] {};\n+\n+ vint lane_id = vint::lane_id();\n+ for (unsigned int i = 0; i < texel_count; i += ASTCENC_SIMD_WIDTH)\n+ {\n+ vint texel_partition(pi.partition_of_texel + i);\n+\n+ vmask lane_mask = lane_id < vint(texel_count);\n+ lane_id += vint(ASTCENC_SIMD_WIDTH);\n+\n+ vmask p0_mask = lane_mask & (texel_partition == vint(0));\n+ vmask p1_mask = lane_mask & (texel_partition == vint(1));\n+ vmask p2_mask = lane_mask & (texel_partition == vint(2));\n+\n+ vfloat data_r = loada(blk.data_r + i);\n+ haccumulate(pp_avg_rgba[0][0], data_r, p0_mask);\n+ haccumulate(pp_avg_rgba[1][0], data_r, p1_mask);\n+ haccumulate(pp_avg_rgba[2][0], data_r, p2_mask);\n+\n+ vfloat data_g = loada(blk.data_g + i);\n+ haccumulate(pp_avg_rgba[0][1], data_g, p0_mask);\n+ haccumulate(pp_avg_rgba[1][1], data_g, p1_mask);\n+ haccumulate(pp_avg_rgba[2][1], data_g, p2_mask);\n+\n+ vfloat data_b = loada(blk.data_b + i);\n+ haccumulate(pp_avg_rgba[0][2], data_b, p0_mask);\n+ haccumulate(pp_avg_rgba[1][2], data_b, p1_mask);\n+ haccumulate(pp_avg_rgba[2][2], data_b, p2_mask);\n+\n+ vfloat data_a = loada(blk.data_a + i);\n+ haccumulate(pp_avg_rgba[0][3], data_a, p0_mask);\n+ haccumulate(pp_avg_rgba[1][3], data_a, p1_mask);\n+ haccumulate(pp_avg_rgba[2][3], data_a, p2_mask);\n+ }\n+\n+ vfloat4 block_total = blk.data_mean * static_cast<float>(blk.texel_count);\n+\n+ vfloat4 p0_total = vfloat4(hadd_s(pp_avg_rgba[0][0]),\n+ hadd_s(pp_avg_rgba[0][1]),\n+ hadd_s(pp_avg_rgba[0][2]),\n+ hadd_s(pp_avg_rgba[0][3]));\n+\n+ vfloat4 p1_total = vfloat4(hadd_s(pp_avg_rgba[1][0]),\n+ hadd_s(pp_avg_rgba[1][1]),\n+ hadd_s(pp_avg_rgba[1][2]),\n+ hadd_s(pp_avg_rgba[1][3]));\n+\n+ vfloat4 p2_total = vfloat4(hadd_s(pp_avg_rgba[2][0]),\n+ hadd_s(pp_avg_rgba[2][1]),\n+ hadd_s(pp_avg_rgba[2][2]),\n+ hadd_s(pp_avg_rgba[2][3]));\n+\n+ vfloat4 p3_total = block_total - p0_total - p1_total- p2_total;\n+\n+ averages[0] = p0_total / static_cast<float>(pi.partition_texel_count[0]);\n+ averages[1] = p1_total / static_cast<float>(pi.partition_texel_count[1]);\n+ averages[2] = p2_total / static_cast<float>(pi.partition_texel_count[2]);\n+ averages[3] = p3_total / static_cast<float>(pi.partition_texel_count[3]);\n+ }\n+}\n+\n/* See header for documentation. */\nvoid compute_avgs_and_dirs_4_comp(\nconst partition_info& pi,\n@@ -35,7 +357,9 @@ void compute_avgs_and_dirs_4_comp(\nint partition_count = pi.partition_count;\npromise(partition_count > 0);\n- vfloat4 average = blk.data_mean;\n+ // Pre-compute partition_averages\n+ vfloat4 partition_averages[BLOCK_MAX_PARTITIONS];\n+ compute_partition_averages_rgba(pi, blk, partition_averages);\nfor (int partition = 0; partition < partition_count; partition++)\n{\n@@ -43,19 +367,7 @@ void compute_avgs_and_dirs_4_comp(\nunsigned int texel_count = pi.partition_texel_count[partition];\npromise(texel_count > 0);\n- // Only compute a partition mean if more than one partition\n- if (partition_count > 1)\n- {\n- average = vfloat4::zero();\n- for (unsigned int i = 0; i < texel_count; i++)\n- {\n- int iwt = texel_indexes[i];\n- average += blk.texel(iwt);\n- }\n-\n- average = average * (1.0f / static_cast<float>(texel_count));\n- }\n-\n+ vfloat4 average = partition_averages[partition];\npm[partition].avg = average;\nvfloat4 sum_xp = vfloat4::zero();\n@@ -119,17 +431,25 @@ void compute_avgs_and_dirs_3_comp(\nunsigned int omitted_component,\npartition_metrics pm[BLOCK_MAX_PARTITIONS]\n) {\n+ // Pre-compute partition_averages\n+ vfloat4 partition_averages[BLOCK_MAX_PARTITIONS];\n+ compute_partition_averages_rgba(pi, blk, partition_averages);\n+\nfloat texel_weight = hadd_s(blk.channel_weight.swz<0, 1, 2>()) / 3.0f;\n- vfloat4 average = blk.data_mean.swz<0, 1, 2>();\nconst float* data_vr = blk.data_r;\nconst float* data_vg = blk.data_g;\nconst float* data_vb = blk.data_b;\n+ // TODO: Data-driven permute would be useful to avoid this ...\nif (omitted_component == 0)\n{\ntexel_weight = hadd_s(blk.channel_weight.swz<1, 2, 3>()) / 3.0f;\n- average = blk.data_mean.swz<1, 2, 3>();\n+\n+ partition_averages[0] = partition_averages[0].swz<1, 2, 3>();\n+ partition_averages[1] = partition_averages[1].swz<1, 2, 3>();\n+ partition_averages[2] = partition_averages[2].swz<1, 2, 3>();\n+ partition_averages[3] = partition_averages[3].swz<1, 2, 3>();\ndata_vr = blk.data_g;\ndata_vg = blk.data_b;\n@@ -138,7 +458,11 @@ void compute_avgs_and_dirs_3_comp(\nelse if (omitted_component == 1)\n{\ntexel_weight = hadd_s(blk.channel_weight.swz<0, 2, 3>()) / 3.0f;\n- average = blk.data_mean.swz<0, 2, 3>();\n+\n+ partition_averages[0] = partition_averages[0].swz<0, 2, 3>();\n+ partition_averages[1] = partition_averages[1].swz<0, 2, 3>();\n+ partition_averages[2] = partition_averages[2].swz<0, 2, 3>();\n+ partition_averages[3] = partition_averages[3].swz<0, 2, 3>();\ndata_vg = blk.data_b;\ndata_vb = blk.data_a;\n@@ -146,10 +470,21 @@ void compute_avgs_and_dirs_3_comp(\nelse if (omitted_component == 2)\n{\ntexel_weight = hadd_s(blk.channel_weight.swz<0, 1, 3>()) / 3.0f;\n- average = blk.data_mean.swz<0, 1, 3>();\n+\n+ partition_averages[0] = partition_averages[0].swz<0, 1, 3>();\n+ partition_averages[1] = partition_averages[1].swz<0, 1, 3>();\n+ partition_averages[2] = partition_averages[2].swz<0, 1, 3>();\n+ partition_averages[3] = partition_averages[3].swz<0, 1, 3>();\ndata_vb = blk.data_a;\n}\n+ else\n+ {\n+ partition_averages[0] = partition_averages[0].swz<0, 1, 2>();\n+ partition_averages[1] = partition_averages[1].swz<0, 1, 2>();\n+ partition_averages[2] = partition_averages[2].swz<0, 1, 2>();\n+ partition_averages[3] = partition_averages[3].swz<0, 1, 2>();\n+ }\nunsigned int partition_count = pi.partition_count;\npromise(partition_count > 0);\n@@ -160,19 +495,7 @@ void compute_avgs_and_dirs_3_comp(\nunsigned int texel_count = pi.partition_texel_count[partition];\npromise(texel_count > 0);\n- // Only compute a partition mean if more than one partition\n- if (partition_count > 1)\n- {\n- average = vfloat4::zero();\n- for (unsigned int i = 0; i < texel_count; i++)\n- {\n- unsigned int iwt = texel_indexes[i];\n- average += vfloat3(data_vr[iwt], data_vg[iwt], data_vb[iwt]);\n- }\n-\n- average = average * (1.0f / static_cast<float>(texel_count));\n- }\n-\n+ vfloat4 average = partition_averages[partition];\npm[partition].avg = average;\nvfloat4 sum_xp = vfloat4::zero();\n@@ -233,7 +556,9 @@ void compute_avgs_and_dirs_3_comp_rgb(\nunsigned int partition_count = pi.partition_count;\npromise(partition_count > 0);\n- vfloat4 average = blk.data_mean.swz<0, 1, 2>();\n+ // Pre-compute partition_averages\n+ vfloat4 partition_averages[BLOCK_MAX_PARTITIONS];\n+ compute_partition_averages_rgb(pi, blk, partition_averages);\nfor (unsigned int partition = 0; partition < partition_count; partition++)\n{\n@@ -241,19 +566,7 @@ void compute_avgs_and_dirs_3_comp_rgb(\nunsigned int texel_count = pi.partition_texel_count[partition];\npromise(texel_count > 0);\n- // Only compute a partition mean if more than one partition\n- if (partition_count > 1)\n- {\n- average = vfloat4::zero();\n- for (unsigned int i = 0; i < texel_count; i++)\n- {\n- unsigned int iwt = texel_indexes[i];\n- average += blk.texel3(iwt);\n- }\n-\n- average = average * (1.0f / static_cast<float>(texel_count));\n- }\n-\n+ vfloat4 average = partition_averages[partition];\npm[partition].avg = average;\nvfloat4 sum_xp = vfloat4::zero();\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_ideal_endpoints_and_weights.cpp", "new_path": "Source/astcenc_ideal_endpoints_and_weights.cpp", "diff": "@@ -331,7 +331,14 @@ static void compute_ideal_colors_and_weights_3_comp(\ndata_vb = blk.data_b;\n}\n+ if (omitted_component == 3)\n+ {\n+ compute_avgs_and_dirs_3_comp_rgb(pi, blk, pms);\n+ }\n+ else\n+ {\ncompute_avgs_and_dirs_3_comp(pi, blk, omitted_component, pms);\n+ }\nbool is_constant_wes { true };\nfloat partition0_len_sq { 0.0f };\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Add sequential select-based partition mean computation
61,745
23.04.2022 21:15:37
-3,600
54451486ce3ca394ce63c69506a7a61e428c068c
Add Doxygen to new functions
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_averages_and_directions.cpp", "new_path": "Source/astcenc_averages_and_directions.cpp", "diff": "#include <cassert>\n-/* See header for documentation. */\n-void compute_partition_averages_rgb(\n+/**\n+ * @brief Compute the average RGB color of each partition.\n+ *\n+ * The algorithm here uses a vectorized sequential scan and per-partition\n+ * color accumulators, using select() to mask texel lanes in other partitions.\n+ *\n+ * We only accumulate sums for N-1 partitions during the scan; the value for\n+ * the last partition can be computed given that we know the block-wide average\n+ * already.\n+ *\n+ * Because of this we could reduce the loop iteration count so it \"just\" spans\n+ * the max texel index needed for the N-1 partitions, which could need fewer\n+ * iterations than the full block texel count. However, this makes the loop\n+ * count erratic and causes more branch mispredictions so is a net loss.\n+ *\n+ * @param pi The partitioning to use.\n+ * @param blk The block data to process.\n+ * @param[out] averages The output averages. Unused partition indices will\n+ * not be initialized, and lane<3> will be zero.\n+ */\n+static void compute_partition_averages_rgb(\nconst partition_info& pi,\nconst image_block& blk,\nvfloat4 averages[BLOCK_MAX_PARTITIONS]\n@@ -176,8 +195,27 @@ void compute_partition_averages_rgb(\n}\n}\n-/* See header for documentation. */\n-void compute_partition_averages_rgba(\n+/**\n+ * @brief Compute the average RGBA color of each partition.\n+ *\n+ * The algorithm here uses a vectorized sequential scan and per-partition\n+ * color accumulators, using select() to mask texel lanes in other partitions.\n+ *\n+ * We only accumulate sums for N-1 partitions during the scan; the value for\n+ * the last partition can be computed given that we know the block-wide average\n+ * already.\n+ *\n+ * Because of this we could reduce the loop iteration count so it \"just\" spans\n+ * the max texel index needed for the N-1 partitions, which could need fewer\n+ * iterations than the full block texel count. However, this makes the loop\n+ * count erratic and causes more branch mispredictions so is a net loss.\n+ *\n+ * @param pi The partitioning to use.\n+ * @param blk The block data to process.\n+ * @param[out] averages The output averages. Unused partition indices will\n+ * not be initialized.\n+ */\n+static void compute_partition_averages_rgba(\nconst partition_info& pi,\nconst image_block& blk,\nvfloat4 averages[BLOCK_MAX_PARTITIONS]\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Add Doxygen to new functions
61,745
23.04.2022 22:03:51
-3,600
684c216784246474df238c93c58088742650040c
Add placeholder changelog
[ { "change_type": "MODIFY", "old_path": "Docs/ChangeLog-3x.md", "new_path": "Docs/ChangeLog-3x.md", "diff": "@@ -6,6 +6,29 @@ release of the 3.x series.\nAll performance data on this page is measured on an Intel Core i5-9600K\nclocked at 4.2 GHz, running `astcenc` using AVX2 and 6 threads.\n+<!-- ---------------------------------------------------------------------- -->\n+## 3.7\n+\n+**Status:** In development\n+\n+The 3.7 release is in development ...\n+\n+* **General:**\n+ * **Feature:** The command line tool PNG loader has been switched to use\n+ the Wuffs library, which is robust and significantly faster than the\n+ current stb_image implementation.\n+\n+### Performance:\n+\n+Key for charts:\n+\n+* Color = block size (see legend).\n+* Letter = image format (N = normal map, G = grayscale, L = LDR, H = HDR).\n+\n+**Relative performance vs 3.5 release:**\n+\n+![Relative scores 3.6 vs 3.5](./ChangeLogImg/relative-3.5-to-3.6.png)\n+\n<!-- ---------------------------------------------------------------------- -->\n## 3.6\n@@ -22,14 +45,15 @@ rebuilding your client-side code using the updated `astcenc.h` header.\n`SELF_DECOMPRESS_ONLY` flag set. The flag therefore no longer improves\ncompression performance, but still reduces context creation time and\ncontext data table memory footprint.\n- * **Feature:** Image quality for 4x4 `-fastest` configuration has been improved.\n- * **Optimization:** Decimation modes are reliably excluded from processing when\n- they are only partially selected in the compressor configuration (e.g. if\n- used for single plane, but not dual plane modes). This is a significant\n+ * **Feature:** Image quality for 4x4 `-fastest` configuration has been\n+ improved.\n+ * **Optimization:** Decimation modes are reliably excluded from processing\n+ when they are only partially selected in the compressor configuration (e.g.\n+ if used for single plane, but not dual plane modes). This is a significant\nperformance optimization for all quality levels.\n- * **Optimization:** Fast-path block load function variant added for 2D LDR images\n- with no swizzle. This is a moderate performance optimization for the fast\n- and fastest quality levels.\n+ * **Optimization:** Fast-path block load function variant added for 2D LDR\n+ images with no swizzle. This is a moderate performance optimization for the\n+ fast and fastest quality levels.\n### Performance:\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Add placeholder changelog
61,745
24.04.2022 19:16:08
-3,600
8afe69603a4026cbbd58f0576cd04212ddbcc9f3
Fix CLI log message
[ { "change_type": "MODIFY", "old_path": "Source/astcenccli_toplevel.cpp", "new_path": "Source/astcenccli_toplevel.cpp", "diff": "@@ -1096,8 +1096,8 @@ static void print_astcenc_config(\nprintf(\" Partition cutoff: %u partitions\\n\", config.tune_partition_count_limit);\nprintf(\" Partition index cutoff: %u partition ids\\n\", config.tune_partition_index_limit);\nprintf(\" PSNR cutoff: %g dB\\n\", (double)config.tune_db_limit);\n- printf(\" 2.2+ partition cutoff: %g\\n\", (double)config.tune_2_partition_early_out_limit_factor);\n- printf(\" 3.2+ partition cutoff: %g\\n\", (double)config.tune_3_partition_early_out_limit_factor);\n+ printf(\" 3 partition cutoff: %g\\n\", (double)config.tune_2_partition_early_out_limit_factor);\n+ printf(\" 4 partition cutoff: %g\\n\", (double)config.tune_3_partition_early_out_limit_factor);\nprintf(\" 2 plane correlation cutoff: %g\\n\", (double)config.tune_2_plane_early_out_limit_correlation);\nprintf(\" Block mode centile cutoff: %g%%\\n\", (double)(config.tune_block_mode_limit));\nprintf(\" Candidate cutoff: %u candidates\\n\", config.tune_candidate_limit);\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Fix CLI log message
61,745
24.04.2022 21:22:25
-3,600
1af6398950519efe0664a3af22a0751bc5b73674
Add explicit VLA vfloatacc type for accumulators By default this is a vfloat4, as this allows invariant output across 4- and 8-wide implementations. However you can now just change it in the source to a vfloat8 for (a bit) more performance if you are OK with some minor output variance.
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_averages_and_directions.cpp", "new_path": "Source/astcenc_averages_and_directions.cpp", "diff": "@@ -61,7 +61,7 @@ static void compute_partition_averages_rgb(\n// For 2 partitions scan results for partition 0, compute partition 1\nelse if (partition_count == 2)\n{\n- vfloat4 pp_avg_rgb[3] {};\n+ vfloatacc pp_avg_rgb[3] {};\nvint lane_id = vint::lane_id();\nfor (unsigned int i = 0; i < texel_count; i += ASTCENC_SIMD_WIDTH)\n@@ -97,7 +97,7 @@ static void compute_partition_averages_rgb(\n// For 3 partitions scan results for partition 0/1, compute partition 2\nelse if (partition_count == 3)\n{\n- vfloat4 pp_avg_rgb[2][3] {};\n+ vfloatacc pp_avg_rgb[2][3] {};\nvint lane_id = vint::lane_id();\nfor (unsigned int i = 0; i < texel_count; i += ASTCENC_SIMD_WIDTH)\n@@ -142,7 +142,7 @@ static void compute_partition_averages_rgb(\nelse\n{\n// For 4 partitions scan results for partition 0/1/2, compute partition 3\n- vfloat4 pp_avg_rgb[3][3] {};\n+ vfloatacc pp_avg_rgb[3][3] {};\nvint lane_id = vint::lane_id();\nfor (unsigned int i = 0; i < texel_count; i += ASTCENC_SIMD_WIDTH)\n@@ -765,8 +765,8 @@ void compute_error_squared_rgba(\nunsigned int partition_count = pi.partition_count;\npromise(partition_count > 0);\n- uncor_error = 0.0f;\n- samec_error = 0.0f;\n+ vfloatacc uncor_errorsumv = vfloatacc::zero();\n+ vfloatacc samec_errorsumv = vfloatacc::zero();\nfor (unsigned int partition = 0; partition < partition_count; partition++)\n{\n@@ -804,11 +804,9 @@ void compute_error_squared_rgba(\nvfloat uncor_loparamv(1e10f);\nvfloat uncor_hiparamv(-1e10f);\n- vfloat4 uncor_errorsumv = vfloat4::zero();\nvfloat samec_loparamv(1e10f);\nvfloat samec_hiparamv(-1e10f);\n- vfloat4 samec_errorsumv = vfloat4::zero();\nvfloat ew_r(blk.channel_weight.lane<0>());\nvfloat ew_g(blk.channel_weight.lane<1>());\n@@ -883,10 +881,6 @@ void compute_error_squared_rgba(\nsamec_loparam = hmin_s(samec_loparamv);\nsamec_hiparam = hmax_s(samec_hiparamv);\n- // Resolve the final scalar accumulator sum\n- haccumulate(uncor_error, uncor_errorsumv);\n- haccumulate(samec_error, samec_errorsumv);\n-\nfloat uncor_linelen = uncor_hiparam - uncor_loparam;\nfloat samec_linelen = samec_hiparam - samec_loparam;\n@@ -894,6 +888,9 @@ void compute_error_squared_rgba(\nuncor_lengths[partition] = astc::max(uncor_linelen, 1e-7f);\nsamec_lengths[partition] = astc::max(samec_linelen, 1e-7f);\n}\n+\n+ uncor_error = hadd_s(uncor_errorsumv);\n+ samec_error = hadd_s(samec_errorsumv);\n}\n/* See header for documentation. */\n@@ -907,8 +904,8 @@ void compute_error_squared_rgb(\nunsigned int partition_count = pi.partition_count;\npromise(partition_count > 0);\n- uncor_error = 0.0f;\n- samec_error = 0.0f;\n+ vfloatacc uncor_errorsumv = vfloatacc::zero();\n+ vfloatacc samec_errorsumv = vfloatacc::zero();\nfor (unsigned int partition = 0; partition < partition_count; partition++)\n{\n@@ -947,11 +944,9 @@ void compute_error_squared_rgb(\nvfloat uncor_loparamv(1e10f);\nvfloat uncor_hiparamv(-1e10f);\n- vfloat4 uncor_errorsumv = vfloat4::zero();\nvfloat samec_loparamv(1e10f);\nvfloat samec_hiparamv(-1e10f);\n- vfloat4 samec_errorsumv = vfloat4::zero();\nvfloat ew_r(blk.channel_weight.lane<0>());\nvfloat ew_g(blk.channel_weight.lane<1>());\n@@ -1017,10 +1012,6 @@ void compute_error_squared_rgb(\nsamec_loparam = hmin_s(samec_loparamv);\nsamec_hiparam = hmax_s(samec_hiparamv);\n- // Resolve the final scalar accumulator sum\n- haccumulate(uncor_error, uncor_errorsumv);\n- haccumulate(samec_error, samec_errorsumv);\n-\nfloat uncor_linelen = uncor_hiparam - uncor_loparam;\nfloat samec_linelen = samec_hiparam - samec_loparam;\n@@ -1028,6 +1019,9 @@ void compute_error_squared_rgb(\npl.uncor_line_len = astc::max(uncor_linelen, 1e-7f);\npl.samec_line_len = astc::max(samec_linelen, 1e-7f);\n}\n+\n+ uncor_error = hadd_s(uncor_errorsumv);\n+ samec_error = hadd_s(samec_errorsumv);\n}\n#endif\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_decompress_symbolic.cpp", "new_path": "Source/astcenc_decompress_symbolic.cpp", "diff": "@@ -559,7 +559,7 @@ float compute_symbolic_block_difference_1plane_1partition(\n}\n// Unpack and compute error for each texel in the partition\n- vfloat4 summav = vfloat4::zero();\n+ vfloatacc summav = vfloatacc::zero();\nvint lane_id = vint::lane_id();\nvint srgb_scale(config.profile == ASTCENC_PRF_LDR_SRGB ? 257 : 1);\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_ideal_endpoints_and_weights.cpp", "new_path": "Source/astcenc_ideal_endpoints_and_weights.cpp", "diff": "@@ -618,7 +618,7 @@ float compute_error_of_weight_set_1plane(\nconst decimation_info& di,\nconst float* dec_weight_quant_uvalue\n) {\n- vfloat4 error_summav = vfloat4::zero();\n+ vfloatacc error_summav = vfloatacc::zero();\nfloat error_summa = 0.0f;\nunsigned int texel_count = di.texel_count;\n@@ -673,9 +673,7 @@ float compute_error_of_weight_set_1plane(\n}\n// Resolve the final scalar accumulator sum\n- haccumulate(error_summa, error_summav);\n-\n- return error_summa;\n+ return error_summa = hadd_s(error_summav);\n}\n/* See header for documentation. */\n@@ -686,8 +684,7 @@ float compute_error_of_weight_set_2planes(\nconst float* dec_weight_quant_uvalue_plane1,\nconst float* dec_weight_quant_uvalue_plane2\n) {\n- vfloat4 error_summav = vfloat4::zero();\n- float error_summa = 0.0f;\n+ vfloatacc error_summav = vfloatacc::zero();\nunsigned int texel_count = di.texel_count;\n// Process SIMD-width chunks, safe to over-fetch - the extra space is zero initialized\n@@ -768,9 +765,7 @@ float compute_error_of_weight_set_2planes(\n}\n// Resolve the final scalar accumulator sum\n- haccumulate(error_summa, error_summav);\n-\n- return error_summa;\n+ return hadd_s(error_summav);\n}\n/* See header for documentation. */\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_pick_best_endpoint_format.cpp", "new_path": "Source/astcenc_pick_best_endpoint_format.cpp", "diff": "@@ -89,10 +89,10 @@ static void compute_error_squared_rgb_single_partition(\nconst uint8_t* texel_indexes = pi.texels_of_partition[partition_index];\npromise(texel_count > 0);\n- vfloat4 a_drop_errv = vfloat4::zero();\n+ vfloatacc a_drop_errv = vfloatacc::zero();\nvfloat default_a(blk.get_default_alpha());\n- vfloat4 uncor_errv = vfloat4::zero();\n+ vfloatacc uncor_errv = vfloatacc::zero();\nvfloat uncor_bs0(uncor_pline.bs.lane<0>());\nvfloat uncor_bs1(uncor_pline.bs.lane<1>());\nvfloat uncor_bs2(uncor_pline.bs.lane<2>());\n@@ -101,12 +101,12 @@ static void compute_error_squared_rgb_single_partition(\nvfloat uncor_amod1(uncor_pline.amod.lane<1>());\nvfloat uncor_amod2(uncor_pline.amod.lane<2>());\n- vfloat4 samec_errv = vfloat4::zero();\n+ vfloatacc samec_errv = vfloatacc::zero();\nvfloat samec_bs0(samec_pline.bs.lane<0>());\nvfloat samec_bs1(samec_pline.bs.lane<1>());\nvfloat samec_bs2(samec_pline.bs.lane<2>());\n- vfloat4 rgbl_errv = vfloat4::zero();\n+ vfloatacc rgbl_errv = vfloatacc::zero();\nvfloat rgbl_bs0(rgbl_pline.bs.lane<0>());\nvfloat rgbl_bs1(rgbl_pline.bs.lane<1>());\nvfloat rgbl_bs2(rgbl_pline.bs.lane<2>());\n@@ -115,7 +115,7 @@ static void compute_error_squared_rgb_single_partition(\nvfloat rgbl_amod1(rgbl_pline.amod.lane<1>());\nvfloat rgbl_amod2(rgbl_pline.amod.lane<2>());\n- vfloat4 l_errv = vfloat4::zero();\n+ vfloatacc l_errv = vfloatacc::zero();\nvfloat l_bs0(l_pline.bs.lane<0>());\nvfloat l_bs1(l_pline.bs.lane<1>());\nvfloat l_bs2(l_pline.bs.lane<2>());\n@@ -200,7 +200,7 @@ static void compute_error_squared_rgb_single_partition(\nhaccumulate(l_errv, error, mask);\n}\n- a_drop_err = hadd_s(a_drop_errv * ews.lane<3>());\n+ a_drop_err = hadd_s(a_drop_errv) * ews.lane<3>();\nuncor_err = hadd_s(uncor_errv);\nsamec_err = hadd_s(samec_errv);\nrgbl_err = hadd_s(rgbl_errv);\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib.h", "new_path": "Source/astcenc_vecmathlib.h", "diff": "#define ASTCENC_SIMD_WIDTH 8\nusing vfloat = vfloat8;\n+ using vfloatacc = vfloat4; // Use vfloat8 for fast but non-invariant accumulators\nusing vint = vint8;\nusing vmask = vmask8;\n#define ASTCENC_SIMD_WIDTH 4\nusing vfloat = vfloat4;\n+ using vfloatacc = vfloat4;\nusing vint = vint4;\nusing vmask = vmask4;\n#define ASTCENC_SIMD_WIDTH 4\nusing vfloat = vfloat4;\n+ using vfloatacc = vfloat4;\nusing vint = vint4;\nusing vmask = vmask4;\n#define ASTCENC_SIMD_WIDTH 4\nusing vfloat = vfloat4;\n+ using vfloatacc = vfloat4;\nusing vint = vint4;\nusing vmask = vmask4;\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_avx2_8.h", "new_path": "Source/astcenc_vecmathlib_avx2_8.h", "diff": "@@ -883,13 +883,12 @@ ASTCENC_SIMD_INLINE vfloat8 select(vfloat8 a, vfloat8 b, vmask8 cond)\n}\n/**\n- * @brief Accumulate the full horizontal sum of a vector.\n+ * @brief Accumulate lane-wise sums for a vector, folded 4-wide.\n+ *\n+ * This is invariant with 4-wide implementations.\n*/\n-ASTCENC_SIMD_INLINE void haccumulate(float& accum, vfloat8 a)\n+ASTCENC_SIMD_INLINE void haccumulate(vfloat4& accum, vfloat8 a)\n{\n- // Two sequential 4-wide accumulates gives invariance with 4-wide code.\n- // Note that this approach gives higher error in the sum; adding the two\n- // smaller numbers together first would be more accurate.\nvfloat4 lo(_mm256_extractf128_ps(a.m, 0));\nhaccumulate(accum, lo);\n@@ -898,35 +897,35 @@ ASTCENC_SIMD_INLINE void haccumulate(float& accum, vfloat8 a)\n}\n/**\n- * @brief Accumulate lane-wise sums for a vector, folded 4-wide.\n+ * @brief Accumulate lane-wise sums for a vector.\n+ *\n+ * This is NOT invariant with 4-wide implementations.\n*/\n-ASTCENC_SIMD_INLINE void haccumulate(vfloat4& accum, vfloat8 a)\n+ASTCENC_SIMD_INLINE void haccumulate(vfloat8& accum, vfloat8 a)\n{\n- // Two sequential 4-wide accumulates gives invariance with 4-wide code.\n- // Note that this approach gives higher error in the sum; adding the two\n- // smaller numbers together first would be more accurate.\n- vfloat4 lo(_mm256_extractf128_ps(a.m, 0));\n- haccumulate(accum, lo);\n-\n- vfloat4 hi(_mm256_extractf128_ps(a.m, 1));\n- haccumulate(accum, hi);\n+ accum += a;\n}\n/**\n* @brief Accumulate masked lane-wise sums for a vector, folded 4-wide.\n+ *\n+ * This is invariant with 4-wide implementations.\n*/\nASTCENC_SIMD_INLINE void haccumulate(vfloat4& accum, vfloat8 a, vmask8 m)\n{\na = select(vfloat8::zero(), a, m);\n+ haccumulate(accum, a);\n+}\n- // Two sequential 4-wide accumulates gives invariance with 4-wide code.\n- // Note that this approach gives higher error in the sum; adding the two\n- // smaller numbers together first would be more accurate.\n- vfloat4 lo(_mm256_extractf128_ps(a.m, 0));\n- haccumulate(accum, lo);\n-\n- vfloat4 hi(_mm256_extractf128_ps(a.m, 1));\n- haccumulate(accum, hi);\n+/**\n+ * @brief Accumulate masked lane-wise sums for a vector.\n+ *\n+ * This is NOT invariant with 4-wide implementations.\n+ */\n+ASTCENC_SIMD_INLINE void haccumulate(vfloat8& accum, vfloat8 a, vmask8 m)\n+{\n+ a = select(vfloat8::zero(), a, m);\n+ haccumulate(accum, a);\n}\n/**\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_common_4.h", "new_path": "Source/astcenc_vecmathlib_common_4.h", "diff": "@@ -270,14 +270,6 @@ ASTCENC_SIMD_INLINE float hmax_s(vfloat4 a)\nreturn hmax(a).lane<0>();\n}\n-/**\n- * @brief Accumulate the full horizontal sum of a vector.\n- */\n-ASTCENC_SIMD_INLINE void haccumulate(float& accum, vfloat4 a)\n-{\n- accum += hadd_s(a);\n-}\n-\n/**\n* @brief Accumulate lane-wise sums for a vector.\n*/\n@@ -292,7 +284,7 @@ ASTCENC_SIMD_INLINE void haccumulate(vfloat4& accum, vfloat4 a)\nASTCENC_SIMD_INLINE void haccumulate(vfloat4& accum, vfloat4 a, vmask4 m)\n{\na = select(vfloat4::zero(), a, m);\n- accum = accum + a;\n+ haccumulate(accum, a);\n}\n/**\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Add explicit VLA vfloatacc type for accumulators By default this is a vfloat4, as this allows invariant output across 4- and 8-wide implementations. However you can now just change it in the source to a vfloat8 for (a bit) more performance if you are OK with some minor output variance.
61,745
24.04.2022 22:11:40
-3,600
d705270606161e2c5ec5724389c2c9aff11122e8
Add undocumented NO_INVARIANCE build The purpose of this build is to let us sanity check how much invariance is actually costing us. The current non-invariant build is 1% faster for AVX2. This includes: * Wider VLA accumulators. * Use of SSE 4.1 dot product.
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib.h", "new_path": "Source/astcenc_vecmathlib.h", "diff": "#define ASTCENC_SIMD_WIDTH 8\nusing vfloat = vfloat8;\n- using vfloatacc = vfloat4; // Use vfloat8 for fast but non-invariant accumulators\n+\n+ #if defined(ASTCENC_NO_INVARIANCE)\n+ using vfloatacc = vfloat8;\n+ #else\n+ using vfloatacc = vfloat4;\n+ #endif\n+\nusing vint = vint8;\nusing vmask = vmask8;\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_common_4.h", "new_path": "Source/astcenc_vecmathlib_common_4.h", "diff": "@@ -295,6 +295,8 @@ ASTCENC_SIMD_INLINE float hadd_rgb_s(vfloat4 a)\nreturn a.lane<0>() + a.lane<1>() + a.lane<2>();\n}\n+#if !defined(ASTCENC_USE_NATIVE_DOT_PRODUCT)\n+\n/**\n* @brief Return the dot product for the full 4 lanes, returning scalar.\n*/\n@@ -332,13 +334,7 @@ ASTCENC_SIMD_INLINE vfloat4 dot3(vfloat4 a, vfloat4 b)\nreturn vfloat4(d3, d3, d3, 0.0f);\n}\n-/**\n- * @brief Generate a reciprocal of a vector.\n- */\n-ASTCENC_SIMD_INLINE vfloat4 recip(vfloat4 b)\n-{\n- return 1.0f / b;\n-}\n+#endif\n/**\n* @brief Debug function to print a vector of ints.\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_sse_4.h", "new_path": "Source/astcenc_vecmathlib_sse_4.h", "diff": "@@ -1017,4 +1017,42 @@ ASTCENC_SIMD_INLINE vfloat4 int_as_float(vint4 v)\nreturn vfloat4(_mm_castsi128_ps(v.m));\n}\n+#if defined(ASTCENC_NO_INVARIANCE) && (ASTCENC_SSE >= 41)\n+\n+#define ASTCENC_USE_NATIVE_DOT_PRODUCT 1\n+\n+/**\n+ * @brief Return the dot product for the full 4 lanes, returning scalar.\n+ */\n+ASTCENC_SIMD_INLINE float dot_s(vfloat4 a, vfloat4 b)\n+{\n+ return _mm_cvtss_f32(_mm_dp_ps(a.m, b.m, 0xFF));\n+}\n+\n+/**\n+ * @brief Return the dot product for the full 4 lanes, returning vector.\n+ */\n+ASTCENC_SIMD_INLINE vfloat4 dot(vfloat4 a, vfloat4 b)\n+{\n+ return vfloat4(_mm_dp_ps(a.m, b.m, 0xFF));\n+}\n+\n+/**\n+ * @brief Return the dot product for the bottom 3 lanes, returning scalar.\n+ */\n+ASTCENC_SIMD_INLINE float dot3_s(vfloat4 a, vfloat4 b)\n+{\n+ return _mm_cvtss_f32(_mm_dp_ps(a.m, b.m, 0x77));\n+}\n+\n+/**\n+ * @brief Return the dot product for the bottom 3 lanes, returning vector.\n+ */\n+ASTCENC_SIMD_INLINE vfloat4 dot3(vfloat4 a, vfloat4 b)\n+{\n+ return vfloat4(_mm_dp_ps(a.m, b.m, 0x77));\n+}\n+\n+#endif // #if defined(ASTCENC_NO_INVARIANCE) && (ASTCENC_SSE >= 41)\n+\n#endif // #ifndef ASTC_VECMATHLIB_SSE_4_H_INCLUDED\n" }, { "change_type": "MODIFY", "old_path": "Source/cmake_core.cmake", "new_path": "Source/cmake_core.cmake", "diff": "@@ -153,6 +153,12 @@ macro(astcenc_set_properties NAME)\n$<$<CXX_COMPILER_ID:${CLANG_LIKE}>:-fsanitize=address>)\nendif()\n+ if(${ENABLE_NO_INVARIANCE})\n+ target_compile_definitions(${NAME}\n+ PRIVATE\n+ ASTCENC_NO_INVARIANCE=1)\n+ endif()\n+\nif(${CLI})\n# Enable LTO on release builds\nset_property(TARGET ${NAME}\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Add undocumented NO_INVARIANCE build The purpose of this build is to let us sanity check how much invariance is actually costing us. The current non-invariant build is 1% faster for AVX2. This includes: * Wider VLA accumulators. * Use of SSE 4.1 dot product.
61,745
24.04.2022 22:33:24
-3,600
9a0e64ccc0f0ddd5cced93ed1dd85905173868ad
Avoid divisor
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_averages_and_directions.cpp", "new_path": "Source/astcenc_averages_and_directions.cpp", "diff": "@@ -473,7 +473,7 @@ void compute_avgs_and_dirs_3_comp(\nvfloat4 partition_averages[BLOCK_MAX_PARTITIONS];\ncompute_partition_averages_rgba(pi, blk, partition_averages);\n- float texel_weight = hadd_s(blk.channel_weight.swz<0, 1, 2>()) / 3.0f;\n+ float texel_weight = hadd_s(blk.channel_weight.swz<0, 1, 2>());\nconst float* data_vr = blk.data_r;\nconst float* data_vg = blk.data_g;\n@@ -482,7 +482,7 @@ void compute_avgs_and_dirs_3_comp(\n// TODO: Data-driven permute would be useful to avoid this ...\nif (omitted_component == 0)\n{\n- texel_weight = hadd_s(blk.channel_weight.swz<1, 2, 3>()) / 3.0f;\n+ texel_weight = hadd_s(blk.channel_weight.swz<1, 2, 3>());\npartition_averages[0] = partition_averages[0].swz<1, 2, 3>();\npartition_averages[1] = partition_averages[1].swz<1, 2, 3>();\n@@ -495,7 +495,7 @@ void compute_avgs_and_dirs_3_comp(\n}\nelse if (omitted_component == 1)\n{\n- texel_weight = hadd_s(blk.channel_weight.swz<0, 2, 3>()) / 3.0f;\n+ texel_weight = hadd_s(blk.channel_weight.swz<0, 2, 3>());\npartition_averages[0] = partition_averages[0].swz<0, 2, 3>();\npartition_averages[1] = partition_averages[1].swz<0, 2, 3>();\n@@ -507,7 +507,7 @@ void compute_avgs_and_dirs_3_comp(\n}\nelse if (omitted_component == 2)\n{\n- texel_weight = hadd_s(blk.channel_weight.swz<0, 1, 3>()) / 3.0f;\n+ texel_weight = hadd_s(blk.channel_weight.swz<0, 1, 3>());\npartition_averages[0] = partition_averages[0].swz<0, 1, 3>();\npartition_averages[1] = partition_averages[1].swz<0, 1, 3>();\n@@ -524,6 +524,8 @@ void compute_avgs_and_dirs_3_comp(\npartition_averages[3] = partition_averages[3].swz<0, 1, 2>();\n}\n+ texel_weight = texel_weight * (1.0f / 3.0f);\n+\nunsigned int partition_count = pi.partition_count;\npromise(partition_count > 0);\n@@ -589,7 +591,7 @@ void compute_avgs_and_dirs_3_comp_rgb(\nconst image_block& blk,\npartition_metrics pm[BLOCK_MAX_PARTITIONS]\n) {\n- float texel_weight = hadd_s(blk.channel_weight.swz<0, 1, 2>()) / 3;\n+ float texel_weight = hadd_s(blk.channel_weight.swz<0, 1, 2>()) * (1.0f / 3.0f);\nunsigned int partition_count = pi.partition_count;\npromise(partition_count > 0);\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_ideal_endpoints_and_weights.cpp", "new_path": "Source/astcenc_ideal_endpoints_and_weights.cpp", "diff": "@@ -302,21 +302,21 @@ static void compute_ideal_colors_and_weights_3_comp(\nconst float* data_vb = nullptr;\nif (omitted_component == 0)\n{\n- error_weight = hadd_s(blk.channel_weight.swz<0, 1, 2>()) / 3.0f;\n+ error_weight = hadd_s(blk.channel_weight.swz<0, 1, 2>());\ndata_vr = blk.data_g;\ndata_vg = blk.data_b;\ndata_vb = blk.data_a;\n}\nelse if (omitted_component == 1)\n{\n- error_weight = hadd_s(blk.channel_weight.swz<0, 2, 3>()) / 3.0f;\n+ error_weight = hadd_s(blk.channel_weight.swz<0, 2, 3>());\ndata_vr = blk.data_r;\ndata_vg = blk.data_b;\ndata_vb = blk.data_a;\n}\nelse if (omitted_component == 2)\n{\n- error_weight = hadd_s(blk.channel_weight.swz<0, 1, 3>()) / 3.0f;\n+ error_weight = hadd_s(blk.channel_weight.swz<0, 1, 3>());\ndata_vr = blk.data_r;\ndata_vg = blk.data_g;\ndata_vb = blk.data_a;\n@@ -325,12 +325,14 @@ static void compute_ideal_colors_and_weights_3_comp(\n{\nassert(omitted_component == 3);\n- error_weight = hadd_s(blk.channel_weight.swz<0, 1, 2>()) / 3.0f;\n+ error_weight = hadd_s(blk.channel_weight.swz<0, 1, 2>());\ndata_vr = blk.data_r;\ndata_vg = blk.data_g;\ndata_vb = blk.data_b;\n}\n+ error_weight = error_weight * (1.0f / 3.0f);\n+\nif (omitted_component == 3)\n{\ncompute_avgs_and_dirs_3_comp_rgb(pi, blk, pms);\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Avoid divisor
61,757
25.04.2022 11:46:48
-3,600
7884560aacdb0605353f9fd34ae1d0b8020d6efa
Remove unused 'iters' variable
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_color_quantize.cpp", "new_path": "Source/astcenc_color_quantize.cpp", "diff": "@@ -123,7 +123,6 @@ static void quantize_rgb(\nint ri0b, gi0b, bi0b, ri1b, gi1b, bi1b;\nfloat rgb0_addon = 0.5f;\nfloat rgb1_addon = 0.5f;\n- int iters = 0;\ndo\n{\nri0 = quant_color_clamp(quant_level, astc::flt2int_rd(r0 + rgb0_addon));\n@@ -142,7 +141,6 @@ static void quantize_rgb(\nrgb0_addon -= 0.2f;\nrgb1_addon += 0.2f;\n- iters++;\n} while (ri0b + gi0b + bi0b > ri1b + gi1b + bi1b);\noutput[0] = static_cast<uint8_t>(ri0);\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Remove unused 'iters' variable (#324)
61,745
25.04.2022 11:59:33
-3,600
1b8eb55a610a8919b0bf858f5fcbeda63f800c36
Move Wuffs PNG loader wrapper into image_external
[ { "change_type": "MODIFY", "old_path": "Source/astcenccli_image_external.cpp", "new_path": "Source/astcenccli_image_external.cpp", "diff": "#include <cstdlib>\n#include <cstdio>\n+#include <fstream>\n+#include <vector>\n+\n+#include \"astcenccli_internal.h\"\n// Configure the STB image imagewrite library build.\n#define STB_IMAGE_IMPLEMENTATION\n@@ -65,3 +69,100 @@ static void astcenc_runtime_assert(bool condition)\n#include \"stb_image.h\"\n#include \"stb_image_write.h\"\n#include \"tinyexr.h\"\n+\n+/**\n+ * @brief Load an image using Wuffs to provide the loader.\n+ *\n+ * @param filename The name of the file to load.\n+ * @param y_flip Should the image be vertically flipped?\n+ * @param[out] is_hdr Is this an HDR image load?\n+ * @param[out] component_count The number of components in the data.\n+ *\n+ * @return The loaded image data in a canonical 4 channel format, or @c nullptr on error.\n+ */\n+astcenc_image* load_png_with_wuffs(\n+ const char* filename,\n+ bool y_flip,\n+ bool& is_hdr,\n+ unsigned int& component_count\n+) {\n+ is_hdr = false;\n+ component_count = 4;\n+\n+ std::ifstream file(filename, std::ios::binary | std::ios::ate);\n+ std::streamsize size = file.tellg();\n+ file.seekg(0, std::ios::beg);\n+\n+ std::vector<uint8_t> buffer(size);\n+ file.read((char*)buffer.data(), size);\n+\n+ wuffs_png__decoder *dec = wuffs_png__decoder__alloc();\n+ if (!dec)\n+ {\n+ return nullptr;\n+ }\n+\n+ wuffs_base__image_config ic;\n+ wuffs_base__io_buffer src = wuffs_base__ptr_u8__reader(buffer.data(), size, true);\n+ wuffs_base__status status = wuffs_png__decoder__decode_image_config(dec, &ic, &src);\n+ if (status.repr)\n+ {\n+ return nullptr;\n+ }\n+\n+ uint32_t dim_x = wuffs_base__pixel_config__width(&ic.pixcfg);\n+ uint32_t dim_y = wuffs_base__pixel_config__height(&ic.pixcfg);\n+ size_t num_pixels = dim_x * dim_y;\n+ if (num_pixels > (SIZE_MAX / 4))\n+ {\n+ return nullptr;\n+ }\n+\n+ // Override the image's native pixel format to be RGBA_NONPREMUL\n+ wuffs_base__pixel_config__set(\n+ &ic.pixcfg,\n+ WUFFS_BASE__PIXEL_FORMAT__RGBA_NONPREMUL,\n+ WUFFS_BASE__PIXEL_SUBSAMPLING__NONE,\n+ dim_x, dim_y);\n+\n+ // Configure the work buffer\n+ size_t workbuf_len = wuffs_png__decoder__workbuf_len(dec).max_incl;\n+ if (workbuf_len > SIZE_MAX)\n+ {\n+ return nullptr;\n+ }\n+\n+ wuffs_base__slice_u8 workbuf_slice = wuffs_base__make_slice_u8((uint8_t*)malloc(workbuf_len), workbuf_len);\n+ if (!workbuf_slice.ptr)\n+ {\n+ return nullptr;\n+ }\n+\n+ wuffs_base__slice_u8 pixbuf_slice = wuffs_base__make_slice_u8((uint8_t*)malloc(num_pixels * 4), num_pixels * 4);\n+ if (!pixbuf_slice.ptr)\n+ {\n+ return nullptr;\n+ }\n+\n+ wuffs_base__pixel_buffer pb;\n+ status = wuffs_base__pixel_buffer__set_from_slice(&pb, &ic.pixcfg, pixbuf_slice);\n+ if (status.repr)\n+ {\n+ return nullptr;\n+ }\n+\n+ // Decode the pixels\n+ status = wuffs_png__decoder__decode_frame(dec, &pb, &src, WUFFS_BASE__PIXEL_BLEND__SRC, workbuf_slice, NULL);\n+ if (status.repr)\n+ {\n+ return nullptr;\n+ }\n+\n+ astcenc_image* img = astc_img_from_unorm8x4_array(pixbuf_slice.ptr, dim_x, dim_y, y_flip);\n+\n+ free(pixbuf_slice.ptr);\n+ free(workbuf_slice.ptr);\n+ free(dec);\n+\n+ return img;\n+}\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenccli_image_load_store.cpp", "new_path": "Source/astcenccli_image_load_store.cpp", "diff": "#include <cstdlib>\n#include <cstring>\n#include <fstream>\n-#include <vector>\n#include \"astcenccli_internal.h\"\n#include \"stb_image.h\"\n#include \"stb_image_write.h\"\n#include \"tinyexr.h\"\n-#include \"wuffs-v0.3.c\"\n/* ============================================================================\nImage load and store through the stb_iamge and tinyexr libraries\n@@ -74,103 +72,6 @@ static astcenc_image* load_image_with_tinyexr(\nreturn res_img;\n}\n-/**\n- * @brief Load an image using Wuffs to provide the loader.\n- *\n- * @param filename The name of the file to load.\n- * @param y_flip Should the image be vertically flipped?\n- * @param[out] is_hdr Is this an HDR image load?\n- * @param[out] component_count The number of components in the data.\n- *\n- * @return The loaded image data in a canonical 4 channel format, or @c nullptr on error.\n- */\n-static astcenc_image* load_png_with_wuffs(\n- const char* filename,\n- bool y_flip,\n- bool& is_hdr,\n- unsigned int& component_count\n-) {\n- is_hdr = false;\n- component_count = 4;\n-\n- std::ifstream file(filename, std::ios::binary | std::ios::ate);\n- std::streamsize size = file.tellg();\n- file.seekg(0, std::ios::beg);\n-\n- std::vector<uint8_t> buffer(size);\n- file.read((char*)buffer.data(), size);\n-\n- wuffs_png__decoder *dec = wuffs_png__decoder__alloc();\n- if (!dec)\n- {\n- return nullptr;\n- }\n-\n- wuffs_base__image_config ic;\n- wuffs_base__io_buffer src = wuffs_base__ptr_u8__reader(buffer.data(), size, true);\n- wuffs_base__status status = wuffs_png__decoder__decode_image_config(dec, &ic, &src);\n- if (status.repr)\n- {\n- return nullptr;\n- }\n-\n- uint32_t dim_x = wuffs_base__pixel_config__width(&ic.pixcfg);\n- uint32_t dim_y = wuffs_base__pixel_config__height(&ic.pixcfg);\n- size_t num_pixels = dim_x * dim_y;\n- if (num_pixels > (SIZE_MAX / 4))\n- {\n- return nullptr;\n- }\n-\n- // Override the image's native pixel format to be RGBA_NONPREMUL\n- wuffs_base__pixel_config__set(\n- &ic.pixcfg,\n- WUFFS_BASE__PIXEL_FORMAT__RGBA_NONPREMUL,\n- WUFFS_BASE__PIXEL_SUBSAMPLING__NONE,\n- dim_x, dim_y);\n-\n- // Configure the work buffer\n- size_t workbuf_len = wuffs_png__decoder__workbuf_len(dec).max_incl;\n- if (workbuf_len > SIZE_MAX)\n- {\n- return nullptr;\n- }\n-\n- wuffs_base__slice_u8 workbuf_slice = wuffs_base__make_slice_u8((uint8_t*)malloc(workbuf_len), workbuf_len);\n- if (!workbuf_slice.ptr)\n- {\n- return nullptr;\n- }\n-\n- wuffs_base__slice_u8 pixbuf_slice = wuffs_base__make_slice_u8((uint8_t*)malloc(num_pixels * 4), num_pixels * 4);\n- if (!pixbuf_slice.ptr)\n- {\n- return nullptr;\n- }\n-\n- wuffs_base__pixel_buffer pb;\n- status = wuffs_base__pixel_buffer__set_from_slice(&pb, &ic.pixcfg, pixbuf_slice);\n- if (status.repr)\n- {\n- return nullptr;\n- }\n-\n- // Decode the pixels\n- status = wuffs_png__decoder__decode_frame(dec, &pb, &src, WUFFS_BASE__PIXEL_BLEND__SRC, workbuf_slice, NULL);\n- if (status.repr)\n- {\n- return nullptr;\n- }\n-\n- astcenc_image* img = astc_img_from_unorm8x4_array(pixbuf_slice.ptr, dim_x, dim_y, y_flip);\n-\n- free(pixbuf_slice.ptr);\n- free(workbuf_slice.ptr);\n- free(dec);\n-\n- return img;\n-}\n-\n/**\n* @brief Load an image using STBImage to provide the loader.\n*\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenccli_internal.h", "new_path": "Source/astcenccli_internal.h", "diff": "@@ -106,6 +106,22 @@ astcenc_image* load_ncimage(\nbool& is_hdr,\nunsigned int& component_count);\n+/**\n+ * @brief Load uncompressed PNG image.\n+ *\n+ * @param filename The file path on disk.\n+ * @param y_flip Should this image be Y flipped?\n+ * @param[out] is_hdr Is the loaded image HDR?\n+ * @param[out] component_count The number of components in the loaded image.\n+ *\n+ * @return The astc image file, or nullptr on error.\n+ */\n+astcenc_image* load_png_with_wuffs(\n+ const char* filename,\n+ bool y_flip,\n+ bool& is_hdr,\n+ unsigned int& component_count);\n+\n/**\n* @brief Save an uncompressed image.\n*\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Move Wuffs PNG loader wrapper into image_external
61,745
25.04.2022 13:53:59
-3,600
3f23371684eb96db3cdd008b2817fb91e3411fcd
Remove obsolete scalar haccumulate tests
[ { "change_type": "MODIFY", "old_path": "Source/UnitTest/test_simd.cpp", "new_path": "Source/UnitTest/test_simd.cpp", "diff": "@@ -855,26 +855,6 @@ TEST(vfloat4, hadd_rgb_s)\nEXPECT_NEAR(r, sum, 0.005f);\n}\n-/** @brief Test vfloat4 haccumulate. */\n-TEST(vfloat4, haccumulate)\n-{\n- // These values will fail to add to the same value if reassociated\n- float a0 = 141.2540435791015625f;\n- float a1 = 5345345.5000000000000000f;\n- float a2 = 234234.7031250000000000f;\n- float a3 = 124353454080.0000000000000000f;\n-\n- vfloat4 a(a0, a1, a2, a3);\n- float ra = 0.0f;\n- haccumulate(ra, a);\n-\n- // Test that reassociation causes a failure with the numbers we chose\n- EXPECT_NE(ra, a0 + a1 + a2 + a3);\n-\n- // Test that the sum works, for the association pattern we want used\n- EXPECT_EQ(ra, (a0 + a2) + (a1 + a3));\n-}\n-\n/** @brief Test vfloat4 sqrt. */\nTEST(vfloat4, sqrt)\n{\n@@ -2501,33 +2481,6 @@ TEST(vfloat8, hadd_s)\nEXPECT_NEAR(r, sum, 0.005f);\n}\n-/** @brief Test vfloat8 haccumulate. */\n-TEST(vfloat8, haccumulate)\n-{\n- // These values will fail to add to the same value if reassociated\n- float l0 = 141.2540435791015625f;\n- float l1 = 5345345.5000000000000000f;\n- float l2 = 234234.7031250000000000f;\n- float l3 = 124353454080.0000000000000000f;\n-\n- vfloat8 a1(l0, l1, l2, l3, l0, l1, l2, l3);\n- float r1 = 0.0f;\n- haccumulate(r1, a1);\n-\n- vfloat4 a2(l0, l1, l2, l3);\n- vfloat4 b2(l0, l1, l2, l3);\n- float r2 = 0.0f;\n- haccumulate(r2, a2);\n- haccumulate(r2, b2);\n-\n- // Test that reassociations cause a failure with the numbers we chose\n- EXPECT_NE(r1, l0 + l1 + l2 + l3 + l0 + l1 + l2 + l3);\n- EXPECT_NE(r1, (l0 + l1 + l2 + l3) + (l0 + l1 + l2 + l3));\n-\n- // Test that the sum works, for the association pattern we want used\n- EXPECT_EQ(r1, r2);\n-}\n-\n/** @brief Test vfloat8 sqrt. */\nTEST(vfloat8, sqrt)\n{\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Remove obsolete scalar haccumulate tests
61,745
25.04.2022 15:12:53
-3,600
d6a9e500f7e07b1721d5d9c0afb47d235688b933
Suppress Wuffs compiler warnings
[ { "change_type": "MODIFY", "old_path": "Source/cmake_core.cmake", "new_path": "Source/cmake_core.cmake", "diff": "@@ -265,7 +265,7 @@ if(CMAKE_CXX_COMPILER_ID MATCHES \"GNU|Clang\")\n\" $<$<NOT:$<CXX_COMPILER_ID:MSVC>>: -Wno-tautological-type-limit-compare>\"\n\" $<$<NOT:$<CXX_COMPILER_ID:MSVC>>: -Wno-cast-qual>\"\n\" $<$<NOT:$<CXX_COMPILER_ID:MSVC>>: -Wno-reserved-identifier>\"\n- \" $<$<NOT:$<CXX_COMPILER_ID:MSVC>>: -Wno-missing-prototypes>\"\n+ \" $<$<CXX_COMPILER_ID:Clang>: -Wno-missing-prototypes>\"\n\" $<$<NOT:$<CXX_COMPILER_ID:MSVC>>: -Wno-suggest-override>\"\n\" $<$<NOT:$<CXX_COMPILER_ID:MSVC>>: -Wno-used-but-marked-unused>\"\n\" $<$<NOT:$<CXX_COMPILER_ID:MSVC>>: -Wno-noexcept-type>\")\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Suppress Wuffs compiler warnings
61,745
25.04.2022 15:34:52
-3,600
23ae45a2a757068a331c634ec951f05178d4c8a7
Handle missing file on Wuffs load path
[ { "change_type": "MODIFY", "old_path": "Source/astcenccli_image_external.cpp", "new_path": "Source/astcenccli_image_external.cpp", "diff": "@@ -90,6 +90,12 @@ astcenc_image* load_png_with_wuffs(\ncomponent_count = 4;\nstd::ifstream file(filename, std::ios::binary | std::ios::ate);\n+ if (!file)\n+ {\n+ printf(\"ERROR: Failed to load image %s (can't fopen)\\n\", filename);\n+ return nullptr;\n+ }\n+\nstd::streamsize size = file.tellg();\nfile.seekg(0, std::ios::beg);\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Handle missing file on Wuffs load path
61,745
26.04.2022 20:18:07
-3,600
1f21e575a4ef0732b878d3460d69ae5f480faaf7
Use vector grayscale detection
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_image.cpp", "new_path": "Source/astcenc_image.cpp", "diff": "@@ -176,7 +176,7 @@ void fetch_image_block(\nvfloat4 data_mean(0.0f);\nvfloat4 data_mean_scale(1.0f / static_cast<float>(bsd.texel_count));\nvfloat4 data_max(-1e38f);\n- bool grayscale = true;\n+ vmask4 grayscalev(true);\n// This works because we impose the same choice everywhere during encode\nuint8_t rgb_lns = (decode_mode == ASTCENC_PRF_HDR) ||\n@@ -230,10 +230,7 @@ void fetch_image_block(\ndata_mean += datav * data_mean_scale;\ndata_max = max(data_max, datav);\n- if (grayscale && (datav.lane<0>() != datav.lane<1>() || datav.lane<0>() != datav.lane<2>()))\n- {\n- grayscale = false;\n- }\n+ grayscalev = grayscalev & (datav.swz<0,0,0,0>() == datav.swz<1,1,2,2>());\nblk.data_r[idx] = datav.lane<0>();\nblk.data_g[idx] = datav.lane<1>();\n@@ -264,7 +261,7 @@ void fetch_image_block(\nblk.data_min = data_min;\nblk.data_mean = data_mean;\nblk.data_max = data_max;\n- blk.grayscale = grayscale;\n+ blk.grayscale = all(grayscalev);\n}\n/* See header for documentation. */\n@@ -291,7 +288,7 @@ void fetch_image_block_fast_ldr(\nvfloat4 data_min(1e38f);\nvfloat4 data_mean = vfloat4::zero();\nvfloat4 data_max(-1e38f);\n- bool grayscale = true;\n+ vmask4 grayscalev(true);\nint idx = 0;\nconst uint8_t* plane = static_cast<const uint8_t*>(img.data[0]);\n@@ -311,10 +308,7 @@ void fetch_image_block_fast_ldr(\ndata_mean += datav;\ndata_max = max(data_max, datav);\n- if (grayscale && (datav.lane<0>() != datav.lane<1>() || datav.lane<0>() != datav.lane<2>()))\n- {\n- grayscale = false;\n- }\n+ grayscalev = grayscalev & (datav.swz<0,0,0,0>() == datav.swz<1,1,2,2>());\nblk.data_r[idx] = datav.lane<0>();\nblk.data_g[idx] = datav.lane<1>();\n@@ -334,7 +328,7 @@ void fetch_image_block_fast_ldr(\nblk.data_min = data_min;\nblk.data_mean = data_mean / static_cast<float>(bsd.texel_count);\nblk.data_max = data_max;\n- blk.grayscale = grayscale;\n+ blk.grayscale = all(grayscalev);\n}\n/* See header for documentation. */\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Use vector grayscale detection
61,745
26.04.2022 21:35:18
-3,600
4ec71cd223308c1d3e979cf324ab70a0582d373a
Shrink some literal tables
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_weight_align.cpp", "new_path": "Source/astcenc_weight_align.cpp", "diff": "@@ -54,11 +54,11 @@ static constexpr unsigned int SINCOS_STEPS { 64 };\nstatic_assert((ANGULAR_STEPS % ASTCENC_SIMD_WIDTH) == 0,\n\"ANGULAR_STEPS must be multiple of ASTCENC_SIMD_WIDTH\");\n-static unsigned int max_angular_steps_needed_for_quant_level[13];\n+static uint8_t max_angular_steps_needed_for_quant_level[13];\n// The next-to-last entry is supposed to have the value 33. This because the 32-weight mode leaves a\n// double-sized hole in the middle of the weight space, so we are better off matching 33 weights.\n-static const unsigned int quantization_steps_for_level[13] {\n+static const uint8_t quantization_steps_for_level[13] {\n2, 3, 4, 5, 6, 8, 10, 12, 16, 20, 24, 33, 36\n};\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Shrink some literal tables
61,745
26.04.2022 21:51:42
-3,600
880438f3b2b7d255f8ca3160aea10d641a2adbb6
Add scalar float bit re-interpretation
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_mathlib.h", "new_path": "Source/astcenc_mathlib.h", "diff": "@@ -323,6 +323,34 @@ static inline int flt2int_rd(float v)\nreturn (int)(v);\n}\n+/**\n+ * @brief SP float bit-interpreted as an integer.\n+ *\n+ * @param v The value to bitcast.\n+ *\n+ * @return The converted value.\n+ */\n+static inline int float_as_int(float v)\n+{\n+ union { int a; float b; } u;\n+ u.b = v;\n+ return u.a;\n+}\n+\n+/**\n+ * @brief Integer bit-interpreted as an SP float.\n+ *\n+ * @param v The value to bitcast.\n+ *\n+ * @return The converted value.\n+ */\n+static inline float int_as_float(int v)\n+{\n+ union { int a; float b; } u;\n+ u.a = v;\n+ return u.b;\n+}\n+\n/**\n* @brief Population bit count.\n*\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Add scalar float bit re-interpretation
61,745
26.04.2022 22:33:12
-3,600
fd39346bde727919049d0f9753f3d72cd8d93d77
Use const reference to original binary data
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_entry.cpp", "new_path": "Source/astcenc_entry.cpp", "diff": "@@ -1134,8 +1134,7 @@ astcenc_error astcenc_decompress_image(\nunsigned int offset = (((z * yblocks + y) * xblocks) + x) * 16;\nconst uint8_t* bp = data + offset;\n- // TODO: Shouldn't this just be a const reference rather than a copy?\n- physical_compressed_block pcb = *reinterpret_cast<const physical_compressed_block*>(bp);\n+ const physical_compressed_block& pcb = *reinterpret_cast<const physical_compressed_block*>(bp);\nsymbolic_compressed_block scb;\nphysical_to_symbolic(*ctx->bsd, pcb, scb);\n@@ -1175,8 +1174,7 @@ astcenc_error astcenc_get_block_info(\nreturn ASTCENC_ERR_BAD_CONTEXT;\n#else\n// Decode the compressed data into a symbolic form\n- // TODO: Shouldn't this be a const reference rather than a copy?\n- physical_compressed_block pcb = *reinterpret_cast<const physical_compressed_block*>(data);\n+ const physical_compressed_block&pcb = *reinterpret_cast<const physical_compressed_block*>(data);\nsymbolic_compressed_block scb;\nphysical_to_symbolic(*ctx->bsd, pcb, scb);\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Use const reference to original binary data
61,745
27.04.2022 19:57:00
-3,600
2891fc939c409a4eceded8d1b06cf74cf431f79d
Supress Wuffs compiler warning
[ { "change_type": "MODIFY", "old_path": "Source/cmake_core.cmake", "new_path": "Source/cmake_core.cmake", "diff": "@@ -269,7 +269,8 @@ if(CMAKE_CXX_COMPILER_ID MATCHES \"GNU|Clang\")\n\" $<$<NOT:$<CXX_COMPILER_ID:MSVC>>: -Wno-suggest-override>\"\n\" $<$<NOT:$<CXX_COMPILER_ID:MSVC>>: -Wno-used-but-marked-unused>\"\n\" $<$<NOT:$<CXX_COMPILER_ID:MSVC>>: -Wno-noexcept-type>\"\n- \" $<$<NOT:$<CXX_COMPILER_ID:MSVC>>: -Wno-comma>\")\n+ \" $<$<NOT:$<CXX_COMPILER_ID:MSVC>>: -Wno-comma>\"\n+ \" $<$<NOT:$<CXX_COMPILER_ID:MSVC>>: -Wno-c99-extensions>\")\nset_source_files_properties(astcenccli_image_external.cpp\nPROPERTIES\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Supress Wuffs compiler warning
61,745
27.04.2022 20:28:41
-3,600
a81cb23a416c826461917fecb71bb1cfbefb9227
Add NEON popcount implementation
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_entry.cpp", "new_path": "Source/astcenc_entry.cpp", "diff": "@@ -261,7 +261,7 @@ static astcenc_error validate_flags(\n) {\n// Flags field must not contain any unknown flag bits\nunsigned int exMask = ~ASTCENC_ALL_FLAGS;\n- if (astc::popcount(flags & exMask) != 0)\n+ if (popcount(flags & exMask) != 0)\n{\nreturn ASTCENC_ERR_BAD_FLAGS;\n}\n@@ -270,7 +270,7 @@ static astcenc_error validate_flags(\nexMask = ASTCENC_FLG_MAP_MASK\n| ASTCENC_FLG_MAP_NORMAL\n| ASTCENC_FLG_MAP_RGBM;\n- if (astc::popcount(flags & exMask) > 1)\n+ if (popcount(flags & exMask) > 1)\n{\nreturn ASTCENC_ERR_BAD_FLAGS;\n}\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_find_best_partitioning.cpp", "new_path": "Source/astcenc_find_best_partitioning.cpp", "diff": "@@ -253,8 +253,8 @@ static inline unsigned int partition_mismatch2(\nconst uint64_t a[2],\nconst uint64_t b[2]\n) {\n- int v1 = astc::popcount(a[0] ^ b[0]) + astc::popcount(a[1] ^ b[1]);\n- int v2 = astc::popcount(a[0] ^ b[1]) + astc::popcount(a[1] ^ b[0]);\n+ int v1 = popcount(a[0] ^ b[0]) + popcount(a[1] ^ b[1]);\n+ int v2 = popcount(a[0] ^ b[1]) + popcount(a[1] ^ b[0]);\nreturn astc::min(v1, v2);\n}\n@@ -270,17 +270,17 @@ static inline unsigned int partition_mismatch3(\nconst uint64_t a[3],\nconst uint64_t b[3]\n) {\n- int p00 = astc::popcount(a[0] ^ b[0]);\n- int p01 = astc::popcount(a[0] ^ b[1]);\n- int p02 = astc::popcount(a[0] ^ b[2]);\n+ int p00 = popcount(a[0] ^ b[0]);\n+ int p01 = popcount(a[0] ^ b[1]);\n+ int p02 = popcount(a[0] ^ b[2]);\n- int p10 = astc::popcount(a[1] ^ b[0]);\n- int p11 = astc::popcount(a[1] ^ b[1]);\n- int p12 = astc::popcount(a[1] ^ b[2]);\n+ int p10 = popcount(a[1] ^ b[0]);\n+ int p11 = popcount(a[1] ^ b[1]);\n+ int p12 = popcount(a[1] ^ b[2]);\n- int p20 = astc::popcount(a[2] ^ b[0]);\n- int p21 = astc::popcount(a[2] ^ b[1]);\n- int p22 = astc::popcount(a[2] ^ b[2]);\n+ int p20 = popcount(a[2] ^ b[0]);\n+ int p21 = popcount(a[2] ^ b[1]);\n+ int p22 = popcount(a[2] ^ b[2]);\nint s0 = p11 + p22;\nint s1 = p12 + p21;\n@@ -309,25 +309,25 @@ static inline unsigned int partition_mismatch4(\nconst uint64_t a[4],\nconst uint64_t b[4]\n) {\n- int p00 = astc::popcount(a[0] ^ b[0]);\n- int p01 = astc::popcount(a[0] ^ b[1]);\n- int p02 = astc::popcount(a[0] ^ b[2]);\n- int p03 = astc::popcount(a[0] ^ b[3]);\n-\n- int p10 = astc::popcount(a[1] ^ b[0]);\n- int p11 = astc::popcount(a[1] ^ b[1]);\n- int p12 = astc::popcount(a[1] ^ b[2]);\n- int p13 = astc::popcount(a[1] ^ b[3]);\n-\n- int p20 = astc::popcount(a[2] ^ b[0]);\n- int p21 = astc::popcount(a[2] ^ b[1]);\n- int p22 = astc::popcount(a[2] ^ b[2]);\n- int p23 = astc::popcount(a[2] ^ b[3]);\n-\n- int p30 = astc::popcount(a[3] ^ b[0]);\n- int p31 = astc::popcount(a[3] ^ b[1]);\n- int p32 = astc::popcount(a[3] ^ b[2]);\n- int p33 = astc::popcount(a[3] ^ b[3]);\n+ int p00 = popcount(a[0] ^ b[0]);\n+ int p01 = popcount(a[0] ^ b[1]);\n+ int p02 = popcount(a[0] ^ b[2]);\n+ int p03 = popcount(a[0] ^ b[3]);\n+\n+ int p10 = popcount(a[1] ^ b[0]);\n+ int p11 = popcount(a[1] ^ b[1]);\n+ int p12 = popcount(a[1] ^ b[2]);\n+ int p13 = popcount(a[1] ^ b[3]);\n+\n+ int p20 = popcount(a[2] ^ b[0]);\n+ int p21 = popcount(a[2] ^ b[1]);\n+ int p22 = popcount(a[2] ^ b[2]);\n+ int p23 = popcount(a[2] ^ b[3]);\n+\n+ int p30 = popcount(a[3] ^ b[0]);\n+ int p31 = popcount(a[3] ^ b[1]);\n+ int p32 = popcount(a[3] ^ b[2]);\n+ int p33 = popcount(a[3] ^ b[3]);\nint mx23 = astc::min(p22 + p33, p23 + p32);\nint mx13 = astc::min(p21 + p33, p23 + p31);\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_mathlib.h", "new_path": "Source/astcenc_mathlib.h", "diff": "@@ -351,31 +351,6 @@ static inline float int_as_float(int v)\nreturn u.b;\n}\n-/**\n- * @brief Population bit count.\n- *\n- * @param v The value to population count.\n- *\n- * @return The number of 1 bits.\n- */\n-static inline int popcount(uint64_t v)\n-{\n-#if ASTCENC_POPCNT >= 1\n- return static_cast<int>(_mm_popcnt_u64(v));\n-#else\n- uint64_t mask1 = 0x5555555555555555ULL;\n- uint64_t mask2 = 0x3333333333333333ULL;\n- uint64_t mask3 = 0x0F0F0F0F0F0F0F0FULL;\n- v -= (v >> 1) & mask1;\n- v = (v & mask2) + ((v >> 2) & mask2);\n- v += v >> 4;\n- v &= mask3;\n- v *= 0x0101010101010101ULL;\n- v >>= 56;\n- return static_cast<int>(v);\n-#endif\n-}\n-\n/**\n* @brief Fast approximation of 1.0 / sqrt(val).\n*\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_common_4.h", "new_path": "Source/astcenc_vecmathlib_common_4.h", "diff": "@@ -336,6 +336,31 @@ ASTCENC_SIMD_INLINE vfloat4 dot3(vfloat4 a, vfloat4 b)\n#endif\n+#if !defined(ASTCENC_USE_NATIVE_POPCOUNT)\n+\n+/**\n+ * @brief Population bit count.\n+ *\n+ * @param v The value to population count.\n+ *\n+ * @return The number of 1 bits.\n+ */\n+static int popcount(uint64_t v)\n+{\n+ uint64_t mask1 = 0x5555555555555555ULL;\n+ uint64_t mask2 = 0x3333333333333333ULL;\n+ uint64_t mask3 = 0x0F0F0F0F0F0F0F0FULL;\n+ v -= (v >> 1) & mask1;\n+ v = (v & mask2) + ((v >> 2) & mask2);\n+ v += v >> 4;\n+ v &= mask3;\n+ v *= 0x0101010101010101ULL;\n+ v >>= 56;\n+ return static_cast<int>(v);\n+}\n+\n+#endif\n+\n/**\n* @brief Debug function to print a vector of ints.\n*/\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_neon_4.h", "new_path": "Source/astcenc_vecmathlib_neon_4.h", "diff": "@@ -918,4 +918,18 @@ ASTCENC_SIMD_INLINE vfloat4 int_as_float(vint4 v)\nreturn vfloat4(vreinterpretq_f32_s32(v.m));\n}\n+#define ASTCENC_USE_NATIVE_POPCOUNT 1\n+\n+/**\n+ * @brief Population bit count.\n+ *\n+ * @param v The value to population count.\n+ *\n+ * @return The number of 1 bits.\n+ */\n+ASTCENC_SIMD_INLINE int popcount(uint64_t v)\n+{\n+ return static_cast<int>(vaddlv_u8(vcnt_u8(vcreate_u8(v))));\n+}\n+\n#endif // #ifndef ASTC_VECMATHLIB_NEON_4_H_INCLUDED\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_sse_4.h", "new_path": "Source/astcenc_vecmathlib_sse_4.h", "diff": "@@ -1055,4 +1055,22 @@ ASTCENC_SIMD_INLINE vfloat4 dot3(vfloat4 a, vfloat4 b)\n#endif // #if defined(ASTCENC_NO_INVARIANCE) && (ASTCENC_SSE >= 41)\n+#if ASTCENC_POPCNT >= 1\n+\n+#define ASTCENC_USE_NATIVE_POPCOUNT 1\n+\n+/**\n+ * @brief Population bit count.\n+ *\n+ * @param v The value to population count.\n+ *\n+ * @return The number of 1 bits.\n+ */\n+ASTCENC_SIMD_INLINE int popcount(uint64_t v)\n+{\n+ return static_cast<int>(_mm_popcnt_u64(v));\n+}\n+\n+#endif // ASTCENC_POPCNT >= 1\n+\n#endif // #ifndef ASTC_VECMATHLIB_SSE_4_H_INCLUDED\n" }, { "change_type": "MODIFY", "old_path": "Test/astc_size_binary.py", "new_path": "Test/astc_size_binary.py", "diff": "@@ -103,7 +103,7 @@ def run_size_macos(binary):\nif line.startswith(\"Segment\"):\nparts = line.split()\n- assert(len(parts) == 3)\n+ assert len(parts) >= 3, parts\ncurrentSegment = parts[1]\nsize = int(parts[2])\n@@ -119,7 +119,7 @@ def run_size_macos(binary):\nif line.startswith(\"Section\"):\nparts = line.split()\n- assert(len(parts) == 3)\n+ assert len(parts) >= 3, parts\nsection = parts[1]\nsize = int(parts[2])\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Add NEON popcount implementation
61,745
27.04.2022 21:16:06
-3,600
fc0641ce77bc63fc8ce8c23029f532dda018f866
Split select_msb and select for faster NEON
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib.h", "new_path": "Source/astcenc_vecmathlib.h", "diff": "@@ -237,7 +237,7 @@ ASTCENC_SIMD_INLINE vfloat atan2(vfloat y, vfloat x)\n{\nvfloat z = atan(abs(y / x));\nvmask xmask = vmask(float_as_int(x).m);\n- return change_sign(select(z, vfloat(astc::PI) - z, xmask), y);\n+ return change_sign(select_msb(z, vfloat(astc::PI) - z, xmask), y);\n}\n/*\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_avx2_8.h", "new_path": "Source/astcenc_vecmathlib_avx2_8.h", "diff": "@@ -875,13 +875,21 @@ ASTCENC_SIMD_INLINE float hadd_s(vfloat8 a)\n}\n/**\n- * @brief Return lanes from @c b if MSB of @c cond is set, else @c a.\n+ * @brief Return lanes from @c b if @c cond is set, else @c a.\n*/\nASTCENC_SIMD_INLINE vfloat8 select(vfloat8 a, vfloat8 b, vmask8 cond)\n{\nreturn vfloat8(_mm256_blendv_ps(a.m, b.m, cond.m));\n}\n+/**\n+ * @brief Return lanes from @c b if MSB of @c cond is set, else @c a.\n+ */\n+ASTCENC_SIMD_INLINE vfloat8 select_msb(vfloat8 a, vfloat8 b, vmask8 cond)\n+{\n+ return vfloat8(_mm256_blendv_ps(a.m, b.m, cond.m));\n+}\n+\n/**\n* @brief Accumulate lane-wise sums for a vector, folded 4-wide.\n*\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_common_4.h", "new_path": "Source/astcenc_vecmathlib_common_4.h", "diff": "@@ -345,7 +345,7 @@ ASTCENC_SIMD_INLINE vfloat4 dot3(vfloat4 a, vfloat4 b)\n*\n* @return The number of 1 bits.\n*/\n-static int popcount(uint64_t v)\n+static inline int popcount(uint64_t v)\n{\nuint64_t mask1 = 0x5555555555555555ULL;\nuint64_t mask2 = 0x3333333333333333ULL;\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_neon_4.h", "new_path": "Source/astcenc_vecmathlib_neon_4.h", "diff": "@@ -783,9 +783,17 @@ ASTCENC_SIMD_INLINE vfloat4 sqrt(vfloat4 a)\n}\n/**\n- * @brief Return lanes from @c b if MSB of @c cond is set, else @c a.\n+ * @brief Return lanes from @c b if @c cond is set, else @c a.\n*/\nASTCENC_SIMD_INLINE vfloat4 select(vfloat4 a, vfloat4 b, vmask4 cond)\n+{\n+ return vfloat4(vbslq_f32(cond.m, b.m, a.m));\n+}\n+\n+/**\n+ * @brief Return lanes from @c b if MSB of @c cond is set, else @c a.\n+ */\n+ASTCENC_SIMD_INLINE vfloat4 select_msb(vfloat4 a, vfloat4 b, vmask4 cond)\n{\nstatic const uint32x4_t msb = vdupq_n_u32(0x80000000u);\nuint32x4_t mask = vcgeq_u32(cond.m, msb);\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_none_4.h", "new_path": "Source/astcenc_vecmathlib_none_4.h", "diff": "@@ -888,7 +888,7 @@ ASTCENC_SIMD_INLINE vfloat4 sqrt(vfloat4 a)\n}\n/**\n- * @brief Return lanes from @c b if MSB of @c cond is set, else @c a.\n+ * @brief Return lanes from @c b if @c cond is set, else @c a.\n*/\nASTCENC_SIMD_INLINE vfloat4 select(vfloat4 a, vfloat4 b, vmask4 cond)\n{\n@@ -898,6 +898,17 @@ ASTCENC_SIMD_INLINE vfloat4 select(vfloat4 a, vfloat4 b, vmask4 cond)\n(cond.m[3] & 0x80000000) ? b.m[3] : a.m[3]);\n}\n+/**\n+ * @brief Return lanes from @c b if MSB of @c cond is set, else @c a.\n+ */\n+ASTCENC_SIMD_INLINE vfloat4 select_msb(vfloat4 a, vfloat4 b, vmask4 cond)\n+{\n+ return vfloat4((cond.m[0] & 0x80000000) ? b.m[0] : a.m[0],\n+ (cond.m[1] & 0x80000000) ? b.m[1] : a.m[1],\n+ (cond.m[2] & 0x80000000) ? b.m[2] : a.m[2],\n+ (cond.m[3] & 0x80000000) ? b.m[3] : a.m[3]);\n+}\n+\n/**\n* @brief Load a vector of gathered results from an array;\n*/\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_sse_4.h", "new_path": "Source/astcenc_vecmathlib_sse_4.h", "diff": "@@ -863,10 +863,22 @@ ASTCENC_SIMD_INLINE vfloat4 sqrt(vfloat4 a)\n}\n/**\n- * @brief Return lanes from @c b if MSB of @c cond is set, else @c a.\n+ * @brief Return lanes from @c b if @c cond is set, else @c a.\n*/\nASTCENC_SIMD_INLINE vfloat4 select(vfloat4 a, vfloat4 b, vmask4 cond)\n{\n+#if ASTCENC_SSE >= 41\n+ return vfloat4(_mm_blendv_ps(a.m, b.m, cond.m));\n+#else\n+ return vfloat4(_mm_or_ps(_mm_and_ps(cond.m, b.m), _mm_andnot_ps(cond.m, a.m)));\n+#endif\n+}\n+\n+/**\n+ * @brief Return lanes from @c b if MSB of @c cond is set, else @c a.\n+ */\n+ASTCENC_SIMD_INLINE vfloat4 select_msb(vfloat4 a, vfloat4 b, vmask4 cond)\n+{\n#if ASTCENC_SSE >= 41\nreturn vfloat4(_mm_blendv_ps(a.m, b.m, cond.m));\n#else\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Split select_msb and select for faster NEON
61,745
27.04.2022 21:35:57
-3,600
dbb37d2f686c858a5a9a4c5705316c48e21f9eb4
Add select change to change log
[ { "change_type": "MODIFY", "old_path": "Docs/ChangeLog-3x.md", "new_path": "Docs/ChangeLog-3x.md", "diff": "@@ -17,6 +17,12 @@ The 3.7 release is in development ...\n* **Feature:** The command line tool PNG loader has been switched to use\nthe Wuffs library, which is robust and significantly faster than the\ncurrent stb_image implementation.\n+ * **Optimization:** Changed SIMD `select()` so that it matches the default\n+ NEON behavior (bitwise select), rather than the default x86-64 behavior\n+ (lane select on MSB). Specialization `select_msb()` added for the one case\n+ we want to select on a sign-bit, where NEON needs a different\n+ implementation. This provides a significant (20-25%) performance uplift on\n+ NEON implementations.\n### Performance:\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Add select change to change log
61,745
28.04.2022 08:08:18
-3,600
a0bd33430a655bc6dc1baa0922a03ff56232c60e
Fix sse2 unit test
[ { "change_type": "MODIFY", "old_path": "Source/UnitTest/test_simd.cpp", "new_path": "Source/UnitTest/test_simd.cpp", "diff": "@@ -901,14 +901,14 @@ TEST(vfloat4, select_msb)\nvfloat4 b(4.0f, 2.0f, 2.0f, 4.0f);\n// Select in one direction\n- vfloat4 r1 = select(a, b, cond);\n+ vfloat4 r1 = select_msb(a, b, cond);\nEXPECT_EQ(r1.lane<0>(), 4.0f);\nEXPECT_EQ(r1.lane<1>(), 3.0f);\nEXPECT_EQ(r1.lane<2>(), 2.0f);\nEXPECT_EQ(r1.lane<3>(), 1.0f);\n// Select in the other\n- vfloat4 r2 = select(b, a, cond);\n+ vfloat4 r2 = select_msb(b, a, cond);\nEXPECT_EQ(r2.lane<0>(), 1.0f);\nEXPECT_EQ(r2.lane<1>(), 2.0f);\nEXPECT_EQ(r2.lane<2>(), 3.0f);\n" }, { "change_type": "MODIFY", "old_path": "Test/astc_update_ref.sh", "new_path": "Test/astc_update_ref.sh", "diff": "@@ -17,6 +17,6 @@ echo \"\"\nTARGET_ROOT=${1}\n-python3 ./Test/astc_test_image.py --test-set all --block-size all --test-quality all --repeats 8 --encoder ref-$1-avx2\n-python3 ./Test/astc_test_image.py --test-set all --block-size all --test-quality all --repeats 8 --encoder ref-$1-sse4.1\n-python3 ./Test/astc_test_image.py --test-set all --block-size all --test-quality all --repeats 8 --encoder ref-$1-sse2\n+python3 ./Test/astc_test_image.py --test-set Small --block-size all --test-quality all --repeats 5 --encoder ref-$1-avx2\n+#python3 ./Test/astc_test_image.py --test-set all --block-size all --test-quality all --repeats 8 --encoder ref-$1-sse4.1\n+#python3 ./Test/astc_test_image.py --test-set all --block-size all --test-quality all --repeats 8 --encoder ref-$1-sse2\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Fix sse2 unit test
61,745
28.04.2022 13:42:15
-3,600
3980e3705030eafaacf7a0baf2f72b2f24b9c55c
Remove MSB select for vint types
[ { "change_type": "MODIFY", "old_path": "Source/UnitTest/test_simd.cpp", "new_path": "Source/UnitTest/test_simd.cpp", "diff": "@@ -1795,28 +1795,6 @@ TEST(vint4, select)\nEXPECT_EQ(r2.lane<3>(), 4);\n}\n-/** @brief Test vint4 select MSB. */\n-TEST(vint4, select_msb)\n-{\n- vint4 msb(0x80000000, 0, 0x80000000, 0);\n- vmask4 cond(msb.m);\n-\n- vint4 a(1, 3, 3, 1);\n- vint4 b(4, 2, 2, 4);\n-\n- vint4 r1 = select(a, b, cond);\n- EXPECT_EQ(r1.lane<0>(), 4);\n- EXPECT_EQ(r1.lane<1>(), 3);\n- EXPECT_EQ(r1.lane<2>(), 2);\n- EXPECT_EQ(r1.lane<3>(), 1);\n-\n- vint4 r2 = select(b, a, cond);\n- EXPECT_EQ(r2.lane<0>(), 1);\n- EXPECT_EQ(r2.lane<1>(), 2);\n- EXPECT_EQ(r2.lane<2>(), 3);\n- EXPECT_EQ(r2.lane<3>(), 4);\n-}\n-\n// VMASK4 tests - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n/** @brief Test vmask4 scalar literal constructor. */\nTEST(vmask4, scalar_literal_construct)\n@@ -3216,36 +3194,6 @@ TEST(vint8, select)\nEXPECT_EQ(r2.lane<7>(), 4);\n}\n-/** @brief Test vint8 select MSB. */\n-TEST(vint8, select_msb)\n-{\n- vint8 msb(0x80000000, 0, 0x80000000, 0, 0x80000000, 0, 0x80000000, 0);\n- vmask8 cond(msb.m);\n-\n- vint8 a(1, 3, 3, 1, 1, 3, 3, 1);\n- vint8 b(4, 2, 2, 4, 4, 2, 2, 4);\n-\n- vint8 r1 = select(a, b, cond);\n- EXPECT_EQ(r1.lane<0>(), 4);\n- EXPECT_EQ(r1.lane<1>(), 3);\n- EXPECT_EQ(r1.lane<2>(), 2);\n- EXPECT_EQ(r1.lane<3>(), 1);\n- EXPECT_EQ(r1.lane<4>(), 4);\n- EXPECT_EQ(r1.lane<5>(), 3);\n- EXPECT_EQ(r1.lane<6>(), 2);\n- EXPECT_EQ(r1.lane<7>(), 1);\n-\n- vint8 r2 = select(b, a, cond);\n- EXPECT_EQ(r2.lane<0>(), 1);\n- EXPECT_EQ(r2.lane<1>(), 2);\n- EXPECT_EQ(r2.lane<2>(), 3);\n- EXPECT_EQ(r2.lane<3>(), 4);\n- EXPECT_EQ(r2.lane<4>(), 1);\n- EXPECT_EQ(r2.lane<5>(), 2);\n- EXPECT_EQ(r2.lane<6>(), 3);\n- EXPECT_EQ(r2.lane<7>(), 4);\n-}\n-\n// vmask8 tests - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n/** @brief Test vmask8 scalar literal constructor. */\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_avx2_8.h", "new_path": "Source/astcenc_vecmathlib_avx2_8.h", "diff": "@@ -586,16 +586,12 @@ ASTCENC_SIMD_INLINE vint8 pack_low_bytes(vint8 v)\n}\n/**\n- * @brief Return lanes from @c b if MSB of @c cond is set, else @c a.\n+ * @brief Return lanes from @c b if @c cond is set, else @c a.\n*/\nASTCENC_SIMD_INLINE vint8 select(vint8 a, vint8 b, vmask8 cond)\n{\n- // Don't use _mm256_blendv_epi8 directly, as it doesn't give the select on\n- // float sign-bit in the mask behavior which is useful. Performance is the\n- // same, these casts are free.\n- __m256 av = _mm256_castsi256_ps(a.m);\n- __m256 bv = _mm256_castsi256_ps(b.m);\n- return vint8(_mm256_castps_si256(_mm256_blendv_ps(av, bv, cond.m)));\n+ __m256i condi = _mm256_castps_si256(cond.m);\n+ return vint8(_mm256_blendv_epi8(a.m, b.m, condi));\n}\n// ============================================================================\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_neon_4.h", "new_path": "Source/astcenc_vecmathlib_neon_4.h", "diff": "@@ -614,13 +614,11 @@ ASTCENC_SIMD_INLINE vint4 pack_low_bytes(vint4 a)\n}\n/**\n- * @brief Return lanes from @c b if MSB of @c cond is set, else @c a.\n+ * @brief Return lanes from @c b if @c cond is set, else @c a.\n*/\nASTCENC_SIMD_INLINE vint4 select(vint4 a, vint4 b, vmask4 cond)\n{\n- static const uint32x4_t msb = vdupq_n_u32(0x80000000u);\n- uint32x4_t mask = vcgeq_u32(cond.m, msb);\n- return vint4(vbslq_s32(mask, b.m, a.m));\n+ return vint4(vbslq_s32(cond.m, b.m, a.m));\n}\n// ============================================================================\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_sse_4.h", "new_path": "Source/astcenc_vecmathlib_sse_4.h", "diff": "@@ -664,20 +664,16 @@ ASTCENC_SIMD_INLINE vint4 pack_low_bytes(vint4 a)\n}\n/**\n- * @brief Return lanes from @c b if MSB of @c cond is set, else @c a.\n+ * @brief Return lanes from @c b if @c cond is set, else @c a.\n*/\nASTCENC_SIMD_INLINE vint4 select(vint4 a, vint4 b, vmask4 cond)\n{\n+ __m128i condi = _mm_castps_si128(cond.m);\n+\n#if ASTCENC_SSE >= 41\n- // Don't use _mm_blendv_epi8 directly, as it doesn't give the select on\n- // float sign-bit in the mask behavior which is useful. Performance is the\n- // same, these casts are free.\n- __m128 av = _mm_castsi128_ps(a.m);\n- __m128 bv = _mm_castsi128_ps(b.m);\n- return vint4(_mm_castps_si128(_mm_blendv_ps(av, bv, cond.m)));\n+ return vint4(_mm_blendv_epi8(a.m, b.m, condi));\n#else\n- __m128i d = _mm_srai_epi32(_mm_castps_si128(cond.m), 31);\n- return vint4(_mm_or_si128(_mm_and_si128(d, b.m), _mm_andnot_si128(d, a.m)));\n+ return vint4(_mm_or_si128(_mm_and_si128(condi, b.m), _mm_andnot_si128(condi, a.m)));\n#endif\n}\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Remove MSB select for vint types
61,745
28.04.2022 22:33:00
-3,600
da94673a944aac9e8f84274229d3bd2ba6e27715
Clean up build options and documentation
[ { "change_type": "MODIFY", "old_path": "CMakeLists.txt", "new_path": "CMakeLists.txt", "diff": "@@ -42,7 +42,9 @@ option(ISA_NONE \"Enable builds for no SIMD\")\noption(ISA_NATIVE \"Enable builds for native SIMD\")\noption(DECOMPRESSOR \"Enable builds for decompression only\")\noption(DIAGNOSTICS \"Enable builds for diagnostic trace\")\n+option(ASAN \"Enable builds width address sanitizer\")\noption(UNITTEST \"Enable builds for unit tests\")\n+option(NO_INVARIANCE \"Enable builds without invariance\")\noption(CLI \"Enable build of CLI\" ON)\nset(UNIVERSAL_BUILD OFF)\n@@ -202,7 +204,9 @@ if(\"${MACOS_BUILD}\")\nprintopt(\"Universal bin \" ${UNIVERSAL_BUILD})\nendif()\nprintopt(\"Decompressor \" ${DECOMPRESSOR})\n+printopt(\"No invariance \" ${NO_INVARIANCE})\nprintopt(\"Diagnostics \" ${DIAGNOSTICS})\n+printopt(\"ASAN \" ${ASAN})\nprintopt(\"Unit tests \" ${UNITTEST})\n# Subcomponents\n" }, { "change_type": "MODIFY", "old_path": "Docs/Building.md", "new_path": "Docs/Building.md", "diff": "This page provides instructions for building `astcenc` from the sources in\nthis repository.\n-Builds use CMake 3.15 or higher as the build system generator. The examples on\n-this page only show how to use it to target NMake (Windows) and Make\n-(Linux and macOS), but CMake supports other build system backends.\n+Builds must use CMake 3.15 or higher as the build system generator. The\n+examples on this page show how to use it to generate build systems for NMake\n+(Windows) and Make (Linux and macOS), but CMake supports other build system\n+backends.\n## Windows\n@@ -13,9 +14,9 @@ Builds for Windows are tested with CMake 3.17 and Visual Studio 2019.\n### Configuring the build\n-To use CMake you must first configure the build. Create a build directory\n-in the root of the `astenc` checkout, and then run `cmake` inside that\n-directory to generate the build system.\n+To use CMake you must first configure the build. Create a build directory in\n+the root of the `astcenc` checkout, and then run `cmake` inside that directory\n+to generate the build system.\n```shell\n# Create a build directory\n@@ -25,17 +26,18 @@ cd build\n# Configure your build of choice, for example:\n# x86-64 using NMake\n-cmake -G \"NMake Makefiles\" -DCMAKE_BUILD_TYPE=Release ^\n- -DCMAKE_INSTALL_PREFIX=.\\ -DISA_AVX2=ON -DISA_SSE41=ON -DISA_SSE2=ON ..\n+cmake -G \"NMake Makefiles\" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=..\\ ^\n+ -DISA_AVX2=ON -DISA_SSE41=ON -DISA_SSE2=ON ..\n# x86-64 using Visual Studio solution\n-cmake -G \"Visual Studio 16 2019\" -T ClangCL ^\n- -DCMAKE_INSTALL_PREFIX=.\\ -DISA_AVX2=ON -DISA_SSE41=ON -DISA_SSE2=ON ..\n+cmake -G \"Visual Studio 16 2019\" -T ClangCL -DCMAKE_INSTALL_PREFIX=..\\ ^\n+ -DISA_AVX2=ON -DISA_SSE41=ON -DISA_SSE2=ON ..\n```\n-This example shows all SIMD variants being enabled. It is possible to build a\n-subset of the supported variants by enabling only the ones you require. At\n-least one variant must be enabled.\n+A single CMake configure can build multiple binaries for a single target CPU\n+architecture, for example building x64 for both SSE2 and AVX2. Each binary name\n+will include the build variant as a postfix. It is possible to build any set of\n+the supported SIMD variants by enabling only the ones you require.\nUsing the Visual Studio Clang-CL LLVM toolchain (`-T ClangCL`) is optional but\nproduces significantly faster binaries than the default toolchain. The C++ LLVM\n@@ -61,7 +63,7 @@ Builds for macOS and Linux are tested with CMake 3.17 and clang++ 9.0.\n### Configuring the build\nTo use CMake you must first configure the build. Create a build directory\n-in the root of the astenc checkout, and then run `cmake` inside that directory\n+in the root of the astcenc checkout, and then run `cmake` inside that directory\nto generate the build system.\n```shell\n@@ -75,32 +77,30 @@ cd build\n# Configure your build of choice, for example:\n# Arm arch64\n-cmake -G \"Unix Makefiles\" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=./ \\\n+cmake -G \"Unix Makefiles\" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=../ \\\n-DISA_NEON=ON ..\n# x86-64\n-cmake -G \"Unix Makefiles\" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=./ \\\n+cmake -G \"Unix Makefiles\" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=../ \\\n-DISA_AVX2=ON -DISA_SSE41=ON -DISA_SSE2=ON ..\n# macOS universal binary build\n-cmake -G \"Unix Makefiles\" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=./ \\\n+cmake -G \"Unix Makefiles\" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=../ \\\n-DISA_AVX2=ON -DISA_NEON=ON ..\n```\n-This example shows all SIMD variants being enabled. It is possible to build a\n-subset of the supported variants by enabling only the ones you require.\n-\n-For all platforms a single CMake configure can build multiple binaries for a\n-single target CPU architecture, for example building x64 for both SSE2 and\n-AVX2. The binary name will include the build variant as a postfix.\n-\n-The macOS platform additionally supports the ability to build a universal\n-binary, combining one x86 and one arm64 variant into a single output binary.\n-The OS select the correct variant to run for the machine being used to run the\n-binary. To build a universal binary select a single x64 variant and a single\n-arm64 variant, and both will be included in a single output binary. It is not\n-required, but if `CMAKE_OSX_ARCHITECTURES` is set on the command line (e.g.\n-by XCode-generated build commands) it will be validated against the other\n+A single CMake configure can build multiple binaries for a single target CPU\n+architecture, for example building x64 for both SSE2 and AVX2. Each binary name\n+will include the build variant as a postfix. It is possible to build any set of\n+the supported SIMD variants by enabling only the ones you require.\n+\n+For macOS, we additionally support the ability to build a universal binary,\n+combining one x86 and one arm64 variant into a single output binary. The OS\n+will select the correct variant to run for the machine being used to run the\n+built binary. To build a universal binary select a single x86 variant and a\n+single arm64 variant, and both will be included in a single output binary. It\n+is not required, but if `CMAKE_OSX_ARCHITECTURES` is set on the command line\n+(e.g. by XCode-generated build commands) it will be validated against the other\nconfiguration variant settings.\n### Building\n@@ -116,7 +116,8 @@ make install -j16\n## Advanced build options\n-For codec developers there are a number of useful features in the build system.\n+For codec developers and power users there are a number of useful features in\n+the build system.\n### Build Types\n@@ -131,17 +132,6 @@ We support and test the following `CMAKE_BUILD_TYPE` options.\nNote that optimized release builds are compiled with link-time optimization,\nwhich can make profiling more challenging ...\n-### No intrinsics builds\n-\n-All normal builds will use SIMD accelerated code paths using intrinsics, as all\n-target architectures (x86-64 and aarch64) guarantee SIMD availability. For\n-development purposes it is possible to build an intrinsic-free build which uses\n-no explicit SIMD acceleration (the compiler may still auto-vectorize).\n-\n-To enable this binary variant add `-DISA_NONE=ON` to the CMake command line\n-when configuring. It is NOT recommended to use this for production; it is\n-significantly slower than the vectorized SIMD builds.\n-\n### Constrained block size builds\nAll normal builds will support all ASTC block sizes, including the worst case\n@@ -151,12 +141,30 @@ by adding `-DBLOCK_MAX_TEXELS=<texel_count>` to to CMake command line when\nconfiguring. Legal block sizes that are unavailable in a restricted build will\nreturn the error `ASTCENC_ERR_NOT_IMPLEMENTED` during context creation.\n-### Testing\n+### Non-invariant builds\n+\n+All normal builds are designed to be invariant, so any build from the same git\n+revision will produce bit-identical results for all compilers and CPU\n+architectures. To achieve this we sacrifice some performance, so if this is\n+not required you can specify `-DNO_INVARIANCE=ON` to enable additional\n+optimizations.\n+\n+### No intrinsics builds\n+\n+All normal builds will use SIMD accelerated code paths using intrinsics, as all\n+supported target architectures (x86 and arm64) guarantee SIMD availability. For\n+development purposes it is possible to build an intrinsic-free build which uses\n+no explicit SIMD acceleration (the compiler may still auto-vectorize).\n+\n+To enable this binary variant add `-DISA_NONE=ON` to the CMake command line\n+when configuring. It is NOT recommended to use this for production; it is\n+significantly slower than the vectorized SIMD builds.\n-We support building unit tests.\n+### Test builds\n-These builds use the `googletest` framework, which is pulled in though a git\n-submodule. On first use, you must fetch the submodule dependency:\n+We support building unit tests. These use the `googletest` framework, which is\n+pulled in though a git submodule. On first use, you must fetch the submodule\n+dependency:\n```shell\ngit submodule init\n@@ -174,7 +182,13 @@ cd build\nctest --verbose\n```\n-### Packaging\n+### Address sanitizer builds\n+\n+We support building with ASAN on Linux and macOS when using a compiler that\n+supports it. To build binaries with ASAN checking enabled add `-DASAN=ON` to\n+the CMake command line when configuring.\n+\n+## Packaging a release bundle\nWe support building a release bundle of all enabled binary configurations in\nthe current CMake configuration using the `package` build target\n" }, { "change_type": "MODIFY", "old_path": "Source/cmake_core.cmake", "new_path": "Source/cmake_core.cmake", "diff": "@@ -142,7 +142,7 @@ macro(astcenc_set_properties NAME)\n# Use pthreads on Linux/macOS\n$<$<PLATFORM_ID:Linux,Darwin>:-pthread>)\n- if(${ENABLE_ASAN})\n+ if(${ASAN})\ntarget_compile_options(${NAME}\nPRIVATE\n$<$<CXX_COMPILER_ID:${CLANG_LIKE}>:-fsanitize=address>)\n@@ -152,7 +152,7 @@ macro(astcenc_set_properties NAME)\n$<$<CXX_COMPILER_ID:${CLANG_LIKE}>:-fsanitize=address>)\nendif()\n- if(${ENABLE_NO_INVARIANCE})\n+ if(${NO_INVARIANCE})\ntarget_compile_definitions(${NAME}\nPRIVATE\nASTCENC_NO_INVARIANCE=1)\n" }, { "change_type": "MODIFY", "old_path": "Test/astc_test_image.py", "new_path": "Test/astc_test_image.py", "diff": "@@ -301,9 +301,9 @@ def parse_command_line():\n\"ref-main-neon\", \"ref-main-sse2\", \"ref-main-sse4.1\", \"ref-main-avx2\"]\n# All test encoders\n- testcoders = [\"none\", \"neon\", \"sse2\", \"sse4.1\", \"avx2\"]\n- testcodersAArch64 = [\"none\", \"neon\"]\n- testcodersX86 = [\"none\", \"sse2\", \"sse4.1\", \"avx2\"]\n+ testcoders = [\"none\", \"neon\", \"sse2\", \"sse4.1\", \"avx2\", \"native\"]\n+ testcodersAArch64 = [\"none\", \"neon\", \"native\"]\n+ testcodersX86 = [\"none\", \"sse2\", \"sse4.1\", \"avx2\", \"native\"]\ncoders = refcoders + testcoders + [\"all-aarch64\", \"all-x86\"]\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Clean up build options and documentation
61,745
28.04.2022 22:46:25
-3,600
f48cc27b2528286126c116f42f2792ed2fa13755
Update change log and frontpage
[ { "change_type": "MODIFY", "old_path": "Docs/ChangeLog-3x.md", "new_path": "Docs/ChangeLog-3x.md", "diff": "@@ -9,19 +9,24 @@ clocked at 4.2 GHz, running `astcenc` using AVX2 and 6 threads.\n<!-- ---------------------------------------------------------------------- -->\n## 3.7\n-**Status:** In development\n+**Status:** April 2022\n-The 3.7 release is in development ...\n+The 3.7 release contains another round of performance optimizations, including\n+significant improvements to the command line front-end (faster PNG loader) and\n+the arm64 build of the codec (faster NEON implementation).\n* **General:**\n* **Feature:** The command line tool PNG loader has been switched to use\nthe Wuffs library, which is robust and significantly faster than the\ncurrent stb_image implementation.\n+ * **Feature:** Support for non-invariant builds returns. Opt-in to slightly\n+ faster, but not bit-exact, builds by setting `-DNO_INVARIANCE=ON` for the\n+ CMake configuration. This improves performance by around 2%.\n* **Optimization:** Changed SIMD `select()` so that it matches the default\nNEON behavior (bitwise select), rather than the default x86-64 behavior\n(lane select on MSB). Specialization `select_msb()` added for the one case\nwe want to select on a sign-bit, where NEON needs a different\n- implementation. This provides a significant (20-25%) performance uplift on\n+ implementation. This provides a significant (>25%) performance uplift on\nNEON implementations.\n### Performance:\n@@ -33,7 +38,7 @@ Key for charts:\n**Relative performance vs 3.5 release:**\n-![Relative scores 3.6 vs 3.5](./ChangeLogImg/relative-3.5-to-3.6.png)\n+![Relative scores 3.7 vs 3.6](./ChangeLogImg/relative-3.6-to-3.7.png)\n<!-- ---------------------------------------------------------------------- -->\n## 3.6\n" }, { "change_type": "ADD", "old_path": "Docs/ChangeLogImg/relative-3.6-to-3.7.png", "new_path": "Docs/ChangeLogImg/relative-3.6-to-3.7.png", "diff": "Binary files /dev/null and b/Docs/ChangeLogImg/relative-3.6-to-3.7.png differ\n" }, { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -58,7 +58,7 @@ from 0.89 bits/pixel up to 8 bits/pixel.\nRelease build binaries for the `astcenc` stable releases are provided in the\n[GitHub Releases page][3].\n-**Latest 3.x stable release:** 3.6\n+**Latest 3.x stable release:** 3.7\n* Change log: [3.x series](./Docs/ChangeLog-3x.md)\n**Latest 2.x stable release:** 2.5\n" }, { "change_type": "MODIFY", "old_path": "Test/astc_test_result_plot.py", "new_path": "Test/astc_test_result_plot.py", "diff": "@@ -260,7 +260,7 @@ def main():\n[\n# Relative scores\n[\"thorough\", \"medium\", \"fast\"],\n- [\"ref-2.5-avx2\", \"ref-3.5-avx2\"],\n+ [\"ref-2.5-avx2\", \"ref-3.7-avx2\"],\n[\"4x4\", \"6x6\", \"8x8\"],\nTrue,\n\"ref-1.7\",\n@@ -270,7 +270,7 @@ def main():\n], [\n# Absolute scores\n[\"thorough\", \"medium\", \"fast\"],\n- [\"ref-1.7\", \"ref-2.5-avx2\", \"ref-3.5-avx2\"],\n+ [\"ref-1.7\", \"ref-2.5-avx2\", \"ref-3.7-avx2\"],\n[\"4x4\", \"6x6\", \"8x8\"],\nFalse,\nNone,\n@@ -296,7 +296,7 @@ def main():\n[\n# Relative scores\n[\"thorough\", \"medium\", \"fast\"],\n- [\"ref-3.6-avx2\"],\n+ [\"ref-3.7-avx2\"],\n[\"4x4\", \"6x6\", \"8x8\"],\nTrue,\n\"ref-1.7\",\n@@ -309,7 +309,7 @@ def main():\n[\n# Relative scores\n[\"thorough\", \"medium\", \"fast\", \"fastest\"],\n- [\"ref-3.6-avx2\"],\n+ [\"ref-3.7-avx2\"],\n[\"4x4\", \"6x6\", \"8x8\"],\nTrue,\n\"ref-2.5-avx2\",\n@@ -322,27 +322,27 @@ def main():\n[\n# Relative scores\n[\"thorough\", \"medium\", \"fast\", \"fastest\"],\n- [\"ref-3.6-avx2\"],\n+ [\"ref-3.7-avx2\"],\n[\"4x4\", \"6x6\", \"8x8\"],\nTrue,\n- \"ref-3.5-avx2\",\n+ \"ref-3.6-avx2\",\nNone,\n\"relative-3.x-vs-3.x.png\",\n(relXLimits, None),\n], [\n# Relative ISAs of latest\n[\"thorough\", \"medium\", \"fast\", \"fastest\"],\n- [\"ref-3.6-sse4.1\", \"ref-3.6-avx2\"],\n+ [\"ref-3.7-sse4.1\", \"ref-3.7-avx2\"],\n[\"4x4\", \"6x6\", \"8x8\"],\nTrue,\n- \"ref-3.6-sse2\",\n+ \"ref-3.7-sse2\",\nNone,\n\"relative-3.x-isa.png\",\n(None, None)\n], [\n# Relative quality of latest\n[\"medium\", \"fast\", \"fastest\"],\n- [\"ref-3.6-avx2\"],\n+ [\"ref-3.7-avx2\"],\n[\"4x4\", \"6x6\", \"8x8\"],\nTrue,\nNone,\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Update change log and frontpage
61,745
29.04.2022 07:24:09
-3,600
d3637a1b7d98ba0882c7349bb73dc6a40d70f304
Make testing native build off by default
[ { "change_type": "MODIFY", "old_path": "Test/astc_test_image.py", "new_path": "Test/astc_test_image.py", "diff": "@@ -302,8 +302,8 @@ def parse_command_line():\n# All test encoders\ntestcoders = [\"none\", \"neon\", \"sse2\", \"sse4.1\", \"avx2\", \"native\"]\n- testcodersAArch64 = [\"none\", \"neon\", \"native\"]\n- testcodersX86 = [\"none\", \"sse2\", \"sse4.1\", \"avx2\", \"native\"]\n+ testcodersAArch64 = [\"none\", \"neon\"]\n+ testcodersX86 = [\"none\", \"sse2\", \"sse4.1\", \"avx2\"]\ncoders = refcoders + testcoders + [\"all-aarch64\", \"all-x86\"]\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Make testing native build off by default
61,769
29.04.2022 14:25:39
-28,800
1494f63ca2f59975ba2e47c824e640c18eb8246e
Make header flags read in-order for easier development
[ { "change_type": "MODIFY", "old_path": "Source/astcenc.h", "new_path": "Source/astcenc.h", "diff": "@@ -311,29 +311,6 @@ static const unsigned int ASTCENC_FLG_MAP_NORMAL = 1 << 0;\n*/\nstatic const unsigned int ASTCENC_FLG_MAP_MASK = 1 << 1;\n-/**\n- * @brief Enable RGBM map compression.\n- *\n- * Input data will be treated as HDR data that has been stored in an LDR RGBM-encoded wrapper\n- * format. Data must be preprocessed by the user to be in LDR RGBM format before calling the\n- * compression function, this flag is only used to control the use of RGBM-specific heuristics and\n- * error metrics.\n- *\n- * IMPORTANT: The ASTC format is prone to bad failure modes with unconstrained RGBM data; very small\n- * M values can round to zero due to quantization and result in black or white pixels. It is highly\n- * recommended that the minimum value of M used in the encoding is kept above a lower threshold (try\n- * 16 or 32). Applying this threshold reduces the number of very dark colors that can be\n- * represented, but is still higher precision than 8-bit LDR.\n- *\n- * When this flag is set the value of @c rgbm_m_scale in the context must be set to the RGBM scale\n- * factor used during reconstruction. This defaults to 5 when in RGBM mode.\n- *\n- * It is recommended that the value of @c cw_a_weight is set to twice the value of the multiplier\n- * scale, ensuring that the M value is accurately encoded. This defaults to 10 when in RGBM mode,\n- * matching the default scale factor.\n- */\n-static const unsigned int ASTCENC_FLG_MAP_RGBM = 1 << 6;\n-\n/**\n* @brief Enable alpha weighting.\n*\n@@ -369,6 +346,29 @@ static const unsigned int ASTCENC_FLG_DECOMPRESS_ONLY = 1 << 4;\n*/\nstatic const unsigned int ASTCENC_FLG_SELF_DECOMPRESS_ONLY = 1 << 5;\n+/**\n+ * @brief Enable RGBM map compression.\n+ *\n+ * Input data will be treated as HDR data that has been stored in an LDR RGBM-encoded wrapper\n+ * format. Data must be preprocessed by the user to be in LDR RGBM format before calling the\n+ * compression function, this flag is only used to control the use of RGBM-specific heuristics and\n+ * error metrics.\n+ *\n+ * IMPORTANT: The ASTC format is prone to bad failure modes with unconstrained RGBM data; very small\n+ * M values can round to zero due to quantization and result in black or white pixels. It is highly\n+ * recommended that the minimum value of M used in the encoding is kept above a lower threshold (try\n+ * 16 or 32). Applying this threshold reduces the number of very dark colors that can be\n+ * represented, but is still higher precision than 8-bit LDR.\n+ *\n+ * When this flag is set the value of @c rgbm_m_scale in the context must be set to the RGBM scale\n+ * factor used during reconstruction. This defaults to 5 when in RGBM mode.\n+ *\n+ * It is recommended that the value of @c cw_a_weight is set to twice the value of the multiplier\n+ * scale, ensuring that the M value is accurately encoded. This defaults to 10 when in RGBM mode,\n+ * matching the default scale factor.\n+ */\n+static const unsigned int ASTCENC_FLG_MAP_RGBM = 1 << 6;\n+\n/**\n* @brief The bit mask of all valid flags.\n*/\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Make header flags read in-order for easier development
61,745
03.05.2022 19:41:08
-3,600
82575f1bc3c795632624cb15cc8e110eb856b5f6
Reduce casts on weight offset calculation
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_weight_align.cpp", "new_path": "Source/astcenc_weight_align.cpp", "diff": "@@ -165,7 +165,7 @@ static void compute_lowest_and_highest_weight(\nunsigned int max_angular_steps,\nunsigned int max_quant_steps,\nconst float* offsets,\n- int* lowest_weight,\n+ float* lowest_weight,\nint* weight_span,\nfloat* error,\nfloat* cut_low_weight_error,\n@@ -218,7 +218,7 @@ static void compute_lowest_and_highest_weight(\nvint span = float_to_int(maxidx - minidx + vfloat(1));\nspan = min(span, vint(max_quant_steps + 3));\nspan = max(span, vint(2));\n- storea(float_to_int(minidx), &lowest_weight[sp]);\n+ storea(minidx, &lowest_weight[sp]);\nstorea(span, &weight_span[sp]);\n// The cut_(lowest/highest)_weight_error indicate the error that results from forcing\n@@ -256,7 +256,7 @@ static void compute_angular_endpoints_for_quant_levels(\ncompute_angular_offsets(weight_count, dec_weight_ideal_value,\nmax_angular_steps, angular_offsets);\n- alignas(ASTCENC_VECALIGN) int32_t lowest_weight[ANGULAR_STEPS];\n+ alignas(ASTCENC_VECALIGN) float lowest_weight[ANGULAR_STEPS];\nalignas(ASTCENC_VECALIGN) int32_t weight_span[ANGULAR_STEPS];\nalignas(ASTCENC_VECALIGN) float error[ANGULAR_STEPS];\nalignas(ASTCENC_VECALIGN) float cut_low_weight_error[ANGULAR_STEPS];\n@@ -333,13 +333,12 @@ static void compute_angular_endpoints_for_quant_levels(\nbsi = astc::max(0, bsi);\n- float stepsize = 1.0f / (1.0f + static_cast<float>(bsi));\n- int lwi = lowest_weight[bsi] + static_cast<int>(best_results[q].lane<2>());\n- int hwi = lwi + q - 1;\n+ float lwi = lowest_weight[bsi] + best_results[q].lane<2>();\n+ float hwi = lwi + static_cast<float>(q) - 1.0f;\n- float offset = angular_offsets[bsi] * stepsize;\n- low_value[i] = offset + static_cast<float>(lwi) * stepsize;\n- high_value[i] = offset + static_cast<float>(hwi) * stepsize;\n+ float stepsize = 1.0f / (1.0f + static_cast<float>(bsi));\n+ low_value[i] = (angular_offsets[bsi] + lwi) * stepsize;\n+ high_value[i] = (angular_offsets[bsi] + hwi) * stepsize;\n}\n}\n@@ -365,7 +364,7 @@ static void compute_lowest_and_highest_weight_lwc(\nunsigned int max_angular_steps,\nunsigned int max_quant_steps,\nconst float* offsets,\n- int* lowest_weight,\n+ float* lowest_weight,\nint* weight_span,\nfloat* error\n) {\n@@ -402,7 +401,7 @@ static void compute_lowest_and_highest_weight_lwc(\nvint span = float_to_int(maxidx - minidx + vfloat(1.0f));\nspan = min(span, vint(max_quant_steps + 3));\nspan = max(span, vint(2));\n- storea(float_to_int(minidx), &lowest_weight[sp]);\n+ storea(minidx, &lowest_weight[sp]);\nstorea(span, &weight_span[sp]);\n// The cut_(lowest/highest)_weight_error indicate the error that results from forcing\n@@ -435,7 +434,7 @@ static void compute_angular_endpoints_for_quant_levels_lwc(\nunsigned int max_angular_steps = max_angular_steps_needed_for_quant_level[max_quant_level];\nalignas(ASTCENC_VECALIGN) float angular_offsets[ANGULAR_STEPS];\n- alignas(ASTCENC_VECALIGN) int32_t lowest_weight[ANGULAR_STEPS];\n+ alignas(ASTCENC_VECALIGN) float lowest_weight[ANGULAR_STEPS];\nalignas(ASTCENC_VECALIGN) int32_t weight_span[ANGULAR_STEPS];\nalignas(ASTCENC_VECALIGN) float error[ANGULAR_STEPS];\n@@ -487,11 +486,12 @@ static void compute_angular_endpoints_for_quant_levels_lwc(\nbsi = astc::max(0, bsi);\n- int lwi = lowest_weight[bsi];\n- int hwi = lwi + q - 1;\n+ float lwi = lowest_weight[bsi];\n+ float hwi = lwi + static_cast<float>(q) - 1.0f;\n- low_value[i] = (angular_offsets[bsi] + static_cast<float>(lwi)) / (1.0f + static_cast<float>(bsi));\n- high_value[i] = (angular_offsets[bsi] + static_cast<float>(hwi)) / (1.0f + static_cast<float>(bsi));\n+ float stepsize = 1.0f / (1.0f + static_cast<float>(bsi));\n+ low_value[i] = (angular_offsets[bsi] + lwi) * stepsize;\n+ high_value[i] = (angular_offsets[bsi] + hwi) * stepsize;\n}\n}\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Reduce casts on weight offset calculation
61,745
05.05.2022 23:48:48
-3,600
84bf695be9cf19668cc9d179d260d0a747da4760
Use simple min/max rather than select
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_weight_align.cpp", "new_path": "Source/astcenc_weight_align.cpp", "diff": "@@ -379,13 +379,9 @@ static void compute_lowest_and_highest_weight_lwc(\nvfloat diff = sval - svalrte;\nerrval += diff * diff;\n- // Reset tracker on min hit\n- vmask mask = svalrte < minidx;\n- minidx = select(minidx, svalrte, mask);\n-\n- // Reset tracker on max hit\n- mask = svalrte > maxidx;\n- maxidx = select(maxidx, svalrte, mask);\n+ // Compute min and max quantized weight spans for each step\n+ minidx = min(minidx, svalrte);\n+ maxidx = max(maxidx, svalrte);\n}\n// Write out min weight and weight span; clamp span to a usable range\n@@ -395,8 +391,8 @@ static void compute_lowest_and_highest_weight_lwc(\nstorea(minidx, &lowest_weight[sp]);\nstorea(span, &weight_span[sp]);\n- // The cut_(lowest/highest)_weight_error indicate the error that results from forcing\n- // samples that should have had the weight value one step (up/down).\n+ // The cut_(lowest/highest)_weight_error indicate the error that results from\n+ // forcing samples that should have had the weight value one step (up/down).\nvfloat ssize = 1.0f / rcp_stepsize;\nvfloat errscale = ssize * ssize;\nstorea(errval * errscale, &error[sp]);\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Use simple min/max rather than select
61,745
06.05.2022 22:19:29
-3,600
6741abbaad9d6548dceef49747fe49e7f6523269
Remove redundant ep_and_wt copies
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_compress_symbolic.cpp", "new_path": "Source/astcenc_compress_symbolic.cpp", "diff": "@@ -388,7 +388,6 @@ static float compress_symbolic_block_for_partition_1plane(\n// Compute ideal weights and endpoint colors, with no quantization or decimation\nendpoints_and_weights& ei = tmpbuf.ei1;\n- endpoints_and_weights *eix = tmpbuf.eix1;\ncompute_ideal_colors_and_weights_1plane(blk, pi, ei);\n// Compute ideal weights and endpoint colors for every decimation\n@@ -412,7 +411,6 @@ static float compress_symbolic_block_for_partition_1plane(\ncompute_ideal_weights_for_decimation(\nei,\n- eix[i],\ndi,\ndec_weights_ideal_value + i * BLOCK_MAX_WEIGHTS);\n}\n@@ -487,7 +485,7 @@ static float compress_symbolic_block_for_partition_1plane(\n// Compute weight quantization errors for the block mode\nqwt_errors[i] = compute_error_of_weight_set_1plane(\n- eix[decimation_mode],\n+ ei,\ndi,\ndec_weights_quant_uvalue + BLOCK_MAX_WEIGHTS * i);\n}\n@@ -532,6 +530,7 @@ static float compress_symbolic_block_for_partition_1plane(\nvfloat4 rgbo_colors[BLOCK_MAX_PARTITIONS];\nsymbolic_compressed_block workscb;\n+ endpoints workep = ei.ep;\nuint8_t* u8_weight_src = dec_weights_quant_pvalue + BLOCK_MAX_WEIGHTS * bm_packed_index;\n@@ -545,14 +544,14 @@ static float compress_symbolic_block_for_partition_1plane(\nrecompute_ideal_colors_1plane(\nblk, pi, di,\nweight_quant_mode, workscb.weights,\n- eix[decimation_mode].ep, rgbs_colors, rgbo_colors);\n+ workep, rgbs_colors, rgbo_colors);\n// Quantize the chosen color\nfor (unsigned int j = 0; j < partition_count; j++)\n{\nworkscb.color_formats[j] = pack_color_endpoints(\n- eix[decimation_mode].ep.endpt0[j],\n- eix[decimation_mode].ep.endpt1[j],\n+ workep.endpt0[j],\n+ workep.endpt1[j],\nrgbs_colors[j],\nrgbo_colors[j],\npartition_format_specifiers[i][j],\n@@ -575,8 +574,8 @@ static float compress_symbolic_block_for_partition_1plane(\nfor (unsigned int j = 0; j < partition_count; j++)\n{\ncolor_formats_mod[j] = pack_color_endpoints(\n- eix[decimation_mode].ep.endpt0[j],\n- eix[decimation_mode].ep.endpt1[j],\n+ workep.endpt0[j],\n+ workep.endpt1[j],\nrgbs_colors[j],\nrgbo_colors[j],\npartition_format_specifiers[i][j],\n@@ -732,8 +731,7 @@ static float compress_symbolic_block_for_partition_2planes(\n// Compute ideal weights and endpoint colors, with no quantization or decimation\nendpoints_and_weights& ei1 = tmpbuf.ei1;\nendpoints_and_weights& ei2 = tmpbuf.ei2;\n- endpoints_and_weights* eix1 = tmpbuf.eix1;\n- endpoints_and_weights* eix2 = tmpbuf.eix2;\n+\ncompute_ideal_colors_and_weights_2planes(bsd, blk, plane2_component, ei1, ei2);\n// Compute ideal weights and endpoint colors for every decimation\n@@ -754,13 +752,11 @@ static float compress_symbolic_block_for_partition_2planes(\ncompute_ideal_weights_for_decimation(\nei1,\n- eix1[i],\ndi,\ndec_weights_ideal_value + i * BLOCK_MAX_WEIGHTS);\ncompute_ideal_weights_for_decimation(\nei2,\n- eix2[i],\ndi,\ndec_weights_ideal_value + i * BLOCK_MAX_WEIGHTS + WEIGHTS_PLANE2_OFFSET);\n}\n@@ -851,8 +847,8 @@ static float compress_symbolic_block_for_partition_2planes(\n// Compute weight quantization errors for the block mode\nqwt_errors[i] = compute_error_of_weight_set_2planes(\n- eix1[decimation_mode],\n- eix2[decimation_mode],\n+ ei1,\n+ ei2,\ndi,\ndec_weights_quant_uvalue + BLOCK_MAX_WEIGHTS * i,\ndec_weights_quant_uvalue + BLOCK_MAX_WEIGHTS * i + WEIGHTS_PLANE2_OFFSET);\n@@ -899,13 +895,11 @@ static float compress_symbolic_block_for_partition_2planes(\ntrace_add_data(\"weight_z\", di.weight_z);\ntrace_add_data(\"weight_quant\", weight_quant_mode);\n- // Recompute the ideal color endpoints before storing them.\n- merge_endpoints(eix1[decimation_mode].ep, eix2[decimation_mode].ep, plane2_component, epm);\n-\nvfloat4 rgbs_color;\nvfloat4 rgbo_color;\nsymbolic_compressed_block workscb;\n+ endpoints workep = epm;\nuint8_t* u8_weight1_src = dec_weights_quant_pvalue + BLOCK_MAX_WEIGHTS * bm_packed_index;\nuint8_t* u8_weight2_src = dec_weights_quant_pvalue + BLOCK_MAX_WEIGHTS * bm_packed_index + WEIGHTS_PLANE2_OFFSET;\n@@ -921,12 +915,12 @@ static float compress_symbolic_block_for_partition_2planes(\nrecompute_ideal_colors_2planes(\nblk, bsd, di, weight_quant_mode,\nworkscb.weights, workscb.weights + WEIGHTS_PLANE2_OFFSET,\n- epm, rgbs_color, rgbo_color, plane2_component);\n+ workep, rgbs_color, rgbo_color, plane2_component);\n// Quantize the chosen color\nworkscb.color_formats[0] = pack_color_endpoints(\n- epm.endpt0[0],\n- epm.endpt1[0],\n+ workep.endpt0[0],\n+ workep.endpt1[0],\nrgbs_color, rgbo_color,\npartition_format_specifiers[i][0],\nworkscb.color_values[0],\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_ideal_endpoints_and_weights.cpp", "new_path": "Source/astcenc_ideal_endpoints_and_weights.cpp", "diff": "@@ -772,8 +772,7 @@ float compute_error_of_weight_set_2planes(\n/* See header for documentation. */\nvoid compute_ideal_weights_for_decimation(\n- const endpoints_and_weights& eai_in,\n- endpoints_and_weights& eai_out,\n+ const endpoints_and_weights& ei,\nconst decimation_info& di,\nfloat* dec_weight_ideal_value\n) {\n@@ -783,40 +782,23 @@ void compute_ideal_weights_for_decimation(\npromise(texel_count > 0);\npromise(weight_count > 0);\n- // This function includes a copy of the epw from eai_in to eai_out. We do it here because we\n- // want to load the data anyway, so we can avoid loading it from memory twice.\n- eai_out.ep = eai_in.ep;\n- eai_out.is_constant_weight_error_scale = eai_in.is_constant_weight_error_scale;\n-\n// Ensure that the end of the output arrays that are used for SIMD paths later are filled so we\n// can safely run SIMD elsewhere without a loop tail. Note that this is always safe as weight\n// arrays always contain space for 64 elements\nunsigned int prev_weight_count_simd = round_down_to_simd_multiple_vla(weight_count - 1);\nstorea(vfloat::zero(), dec_weight_ideal_value + prev_weight_count_simd);\n- // If we have a 1:1 mapping just shortcut the computation - clone the weights into both the\n- // weight set and the output epw copy.\n-\n- // Transfer enough to also copy zero initialized SIMD over-fetch region\n+ // If we have a 1:1 mapping just shortcut the computation. Transfer enough to also copy the\n+ // zero-initialized SIMD over-fetch region\n+ if (is_direct)\n+ {\nunsigned int texel_count_simd = round_up_to_simd_multiple_vla(texel_count);\nfor (unsigned int i = 0; i < texel_count_simd; i += ASTCENC_SIMD_WIDTH)\n{\n- vfloat weight(eai_in.weights + i);\n- vfloat weight_error_scale(eai_in.weight_error_scale + i);\n-\n- storea(weight, eai_out.weights + i);\n- storea(weight_error_scale, eai_out.weight_error_scale + i);\n-\n- // Direct 1:1 weight mapping, so clone weights directly\n- // TODO: Can we just avoid the copy for direct cases?\n- if (is_direct)\n- {\n+ vfloat weight(ei.weights + i);\nstorea(weight, dec_weight_ideal_value + i);\n}\n- }\n- if (is_direct)\n- {\nreturn;\n}\n@@ -824,8 +806,8 @@ void compute_ideal_weights_for_decimation(\nalignas(ASTCENC_VECALIGN) float infilled_weights[BLOCK_MAX_TEXELS];\n// Compute an initial average for each decimated weight\n- bool constant_wes = eai_in.is_constant_weight_error_scale;\n- vfloat weight_error_scale(eai_in.weight_error_scale[0]);\n+ bool constant_wes = ei.is_constant_weight_error_scale;\n+ vfloat weight_error_scale(ei.weight_error_scale[0]);\n// This overshoots - this is OK as we initialize the array tails in the\n// decimation table structures to safe values ...\n@@ -847,13 +829,13 @@ void compute_ideal_weights_for_decimation(\nif (!constant_wes)\n{\n- weight_error_scale = gatherf(eai_in.weight_error_scale, texel);\n+ weight_error_scale = gatherf(ei.weight_error_scale, texel);\n}\nvfloat contrib_weight = weight * weight_error_scale;\nweight_weight += contrib_weight;\n- initial_weight += gatherf(eai_in.weights, texel) * contrib_weight;\n+ initial_weight += gatherf(ei.weights, texel) * contrib_weight;\n}\nstorea(initial_weight / weight_weight, dec_weight_ideal_value + i);\n@@ -905,12 +887,12 @@ void compute_ideal_weights_for_decimation(\nif (!constant_wes)\n{\n- weight_error_scale = gatherf(eai_in.weight_error_scale, texel);\n+ weight_error_scale = gatherf(ei.weight_error_scale, texel);\n}\nvfloat scale = weight_error_scale * contrib_weight;\nvfloat old_weight = gatherf(infilled_weights, texel);\n- vfloat ideal_weight = gatherf(eai_in.weights, texel);\n+ vfloat ideal_weight = gatherf(ei.weights, texel);\nerror_change0 += contrib_weight * scale;\nerror_change1 += (old_weight - ideal_weight) * scale;\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_internal.h", "new_path": "Source/astcenc_internal.h", "diff": "@@ -1093,12 +1093,6 @@ struct alignas(ASTCENC_VECALIGN) compression_working_buffers\n/** @brief Ideal endpoints and weights for plane 2. */\nendpoints_and_weights ei2;\n- /** @brief Ideal decimated endpoints and weights for plane 1. */\n- endpoints_and_weights eix1[WEIGHTS_MAX_DECIMATION_MODES];\n-\n- /** @brief Ideal decimated endpoints and weights for plane 2. */\n- endpoints_and_weights eix2[WEIGHTS_MAX_DECIMATION_MODES];\n-\n/**\n* @brief Decimated ideal weight values.\n*\n@@ -1887,14 +1881,12 @@ void compute_ideal_colors_and_weights_2planes(\n* Then, set step size to <some initial value> and attempt one step towards the original ideal\n* weight if it helps to reduce error.\n*\n- * @param eai_in The non-decimated endpoints and weights.\n- * @param eai_out A copy of eai_in we can modify later for refinement.\n+ * @param ei The non-decimated endpoints and weights.\n* @param di The selected weight decimation.\n* @param[out] dec_weight_ideal_value The ideal values for the decimated weight set.\n*/\nvoid compute_ideal_weights_for_decimation(\n- const endpoints_and_weights& eai_in,\n- endpoints_and_weights& eai_out,\n+ const endpoints_and_weights& ei,\nconst decimation_info& di,\nfloat* dec_weight_ideal_value);\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Remove redundant ep_and_wt copies
61,745
07.05.2022 20:16:11
-3,600
1d218322d5e02aa01e85d588d8105704f488fd33
Vectorize unquant weight creation
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_compress_symbolic.cpp", "new_path": "Source/astcenc_compress_symbolic.cpp", "diff": "@@ -234,8 +234,9 @@ static bool realign_weights_decimated(\nendpnt1[pa_idx]);\n}\n- uint8_t uq_pl_weights[BLOCK_MAX_WEIGHTS];\n- float uq_pl_weightsf[BLOCK_MAX_WEIGHTS];\n+ alignas(ASTCENC_VECALIGN) int uq_pl_weights[BLOCK_MAX_WEIGHTS];\n+ alignas(ASTCENC_VECALIGN) float uq_pl_weightsf[BLOCK_MAX_WEIGHTS];\n+\nuint8_t* dec_weights_quant_pvalue = scb.weights;\nbool adjustments = false;\n@@ -253,10 +254,15 @@ static bool realign_weights_decimated(\n}\n// Create an unquantized weight grid for this decimation level\n- for (unsigned int we_idx = 0; we_idx < weight_count; we_idx++)\n+ for (unsigned int we_idx = 0; we_idx < weight_count; we_idx += ASTCENC_SIMD_WIDTH)\n{\n- uq_pl_weights[we_idx] = qat->unquantized_value[dec_weights_quant_pvalue[we_idx]];\n- uq_pl_weightsf[we_idx] = static_cast<float>(uq_pl_weights[we_idx]);\n+ vint quant_value(dec_weights_quant_pvalue + we_idx);\n+\n+ vint unquant_value = gatheri(qat->unquantized_value, quant_value);\n+ storea(unquant_value, uq_pl_weights + we_idx);\n+\n+ vfloat unquant_valuef = int_to_float(unquant_value);\n+ storea(unquant_valuef, uq_pl_weightsf + we_idx);\n}\n// For each weight compute previous, current, and next errors\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_decompress_symbolic.cpp", "new_path": "Source/astcenc_decompress_symbolic.cpp", "diff": "@@ -132,8 +132,8 @@ void unpack_weights(\nint weights_plane2[BLOCK_MAX_TEXELS]\n) {\n// First, unquantize the weights ...\n- int uq_plane1_weights[BLOCK_MAX_WEIGHTS];\n- int uq_plane2_weights[BLOCK_MAX_WEIGHTS];\n+ alignas(ASTCENC_VECALIGN) int uq_plane1_weights[BLOCK_MAX_WEIGHTS];\n+ alignas(ASTCENC_VECALIGN) int uq_plane2_weights[BLOCK_MAX_WEIGHTS];\nunsigned int weight_count = di.weight_count;\nconst quantization_and_transfer_table *qat = &(quant_and_xfer_tables[quant_level]);\n@@ -142,9 +142,11 @@ void unpack_weights(\n// Safe to overshoot as all arrays are allocated to full size\nif (!is_dual_plane)\n{\n- for (unsigned int i = 0; i < weight_count; i++)\n+ for (unsigned int i = 0; i < weight_count; i += ASTCENC_SIMD_WIDTH)\n{\n- uq_plane1_weights[i] = qat->unquantized_value[scb.weights[i]];\n+ vint quant_value(scb.weights + i);\n+ vint unquant_value = gatheri(qat->unquantized_value, quant_value);\n+ storea(unquant_value, uq_plane1_weights + i);\n}\nfor (unsigned int i = 0; i < bsd.texel_count; i += ASTCENC_SIMD_WIDTH)\n@@ -154,12 +156,18 @@ void unpack_weights(\n}\nelse\n{\n- for (unsigned int i = 0; i < weight_count; i++)\n+ for (unsigned int i = 0; i < weight_count; i += ASTCENC_SIMD_WIDTH)\n{\n- uq_plane1_weights[i] = qat->unquantized_value[scb.weights[i]];\n- uq_plane2_weights[i] = qat->unquantized_value[scb.weights[i + WEIGHTS_PLANE2_OFFSET]];\n+ vint quant_value1(scb.weights + i);\n+ vint unquant_value1 = gatheri(qat->unquantized_value, quant_value1);\n+ storea(unquant_value1, uq_plane1_weights + i);\n+\n+ vint quant_value2(scb.weights + i + WEIGHTS_PLANE2_OFFSET);\n+ vint unquant_value2 = gatheri(qat->unquantized_value, quant_value2);\n+ storea(unquant_value2, uq_plane2_weights + i);\n}\n+ // TODO: Scope for merging this into a single pass sharing \"di\" data?\nfor (unsigned int i = 0; i < bsd.texel_count; i += ASTCENC_SIMD_WIDTH)\n{\nstore(compute_value_of_texel_weight_int_vla(i, di, uq_plane1_weights), weights_plane1 + i);\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_ideal_endpoints_and_weights.cpp", "new_path": "Source/astcenc_ideal_endpoints_and_weights.cpp", "diff": "@@ -1060,10 +1060,13 @@ void recompute_ideal_colors_1plane(\nconst quantization_and_transfer_table& qat = quant_and_xfer_tables[weight_quant_mode];\n- float dec_weight[BLOCK_MAX_WEIGHTS];\n- for (unsigned int i = 0; i < weight_count; i++)\n+ alignas(ASTCENC_VECALIGN) float dec_weight[BLOCK_MAX_WEIGHTS];\n+ for (unsigned int i = 0; i < weight_count; i += ASTCENC_SIMD_WIDTH)\n{\n- dec_weight[i] = qat.unquantized_value[dec_weights_quant_pvalue[i]] * (1.0f / 64.0f);\n+ vint quant_value(dec_weights_quant_pvalue + i);\n+ vint unquant_value = gatheri(qat.unquantized_value, quant_value);\n+ vfloat unquant_valuef = int_to_float(unquant_value) * vfloat(1.0f / 64.0f);\n+ storea(unquant_valuef, dec_weight + i);\n}\nalignas(ASTCENC_VECALIGN) float undec_weight[BLOCK_MAX_TEXELS];\n@@ -1280,16 +1283,25 @@ void recompute_ideal_colors_2planes(\npromise(total_texel_count > 0);\npromise(weight_count > 0);\n- const quantization_and_transfer_table *qat = &(quant_and_xfer_tables[weight_quant_mode]);\n+ const quantization_and_transfer_table& qat = quant_and_xfer_tables[weight_quant_mode];\n- float dec_weight_plane1[BLOCK_MAX_WEIGHTS_2PLANE];\n- float dec_weight_plane2[BLOCK_MAX_WEIGHTS_2PLANE];\n+ alignas(ASTCENC_VECALIGN) float dec_weight_plane1[BLOCK_MAX_WEIGHTS_2PLANE];\n+ alignas(ASTCENC_VECALIGN) float dec_weight_plane2[BLOCK_MAX_WEIGHTS_2PLANE];\nassert(weight_count <= BLOCK_MAX_WEIGHTS_2PLANE);\n- for (unsigned int i = 0; i < weight_count; i++)\n+\n+ for (unsigned int i = 0; i < weight_count; i += ASTCENC_SIMD_WIDTH)\n{\n- dec_weight_plane1[i] = qat->unquantized_value[dec_weights_quant_pvalue_plane1[i]] * (1.0f / 64.0f);\n- dec_weight_plane2[i] = qat->unquantized_value[dec_weights_quant_pvalue_plane2[i]] * (1.0f / 64.0f);\n+ vint quant_value1(dec_weights_quant_pvalue_plane1 + i);\n+ vint unquant_value1 = gatheri(qat.unquantized_value, quant_value1);\n+ vfloat unquant_value1f = int_to_float(unquant_value1) * vfloat(1.0f / 64.0f);\n+ storea(unquant_value1f, dec_weight_plane1 + i);\n+\n+ vint quant_value2(dec_weights_quant_pvalue_plane2 + i);\n+ vint unquant_value2 = gatheri(qat.unquantized_value, quant_value2);\n+ vfloat unquant_value2f = int_to_float(unquant_value2) * vfloat(1.0f / 64.0f);\n+ storea(unquant_value2f, dec_weight_plane2 + i);\n+\n}\nalignas(ASTCENC_VECALIGN) float undec_weight_plane1[BLOCK_MAX_TEXELS];\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_internal.h", "new_path": "Source/astcenc_internal.h", "diff": "@@ -1191,7 +1191,7 @@ struct quantization_and_transfer_table\nint32_t scramble_map[32];\n/** @brief The scrambled unquantized values. */\n- uint8_t unquantized_value[32];\n+ int32_t unquantized_value[32];\n/**\n* @brief A table of previous-and-next weights, indexed by the current unquantized value.\n@@ -1203,7 +1203,6 @@ struct quantization_and_transfer_table\nuint32_t prev_next_values[65];\n};\n-\n/** @brief The precomputed quant and transfer table. */\nextern const quantization_and_transfer_table quant_and_xfer_tables[12];\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Vectorize unquant weight creation
61,745
07.05.2022 21:58:48
-3,600
a0bbf12daddbbc6078202eef9ea8d76e13eeb294
Don't use "unsigned" without "int"
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_avx2_8.h", "new_path": "Source/astcenc_vecmathlib_avx2_8.h", "diff": "@@ -340,7 +340,7 @@ ASTCENC_SIMD_INLINE vmask8 operator~(vmask8 a)\n*\n* bit0 = lane 0\n*/\n-ASTCENC_SIMD_INLINE unsigned mask(vmask8 a)\n+ASTCENC_SIMD_INLINE unsigned int mask(vmask8 a)\n{\nreturn _mm256_movemask_ps(a.m);\n}\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Don't use "unsigned" without "int"
61,745
07.05.2022 22:16:46
-3,600
ccbf22f2f298ef89eef7fad1c1348813f1f6b2fa
Use references to quant_and_xfer tables
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_compress_symbolic.cpp", "new_path": "Source/astcenc_compress_symbolic.cpp", "diff": "@@ -79,7 +79,7 @@ static bool realign_weights_undecimated(\n// Get the quantization table\nconst block_mode& bm = bsd.get_block_mode(scb.block_mode);\nunsigned int weight_quant_level = bm.quant_mode;\n- const quantization_and_transfer_table *qat = &(quant_and_xfer_tables[weight_quant_level]);\n+ const quant_and_transfer_table& qat = quant_and_xfer_tables[weight_quant_level];\nunsigned int max_plane = bm.is_dual_plane;\nint plane2_component = bm.is_dual_plane ? scb.plane2_component : -1;\n@@ -126,9 +126,9 @@ static bool realign_weights_undecimated(\npromise(bsd.texel_count > 0);\nfor (unsigned int texel = 0; texel < bsd.texel_count; texel++)\n{\n- int uqw = qat->unquantized_value[dec_weights_quant_pvalue[texel]];\n+ int uqw = qat.unquantized_value[dec_weights_quant_pvalue[texel]];\n- uint32_t prev_and_next = qat->prev_next_values[uqw];\n+ uint32_t prev_and_next = qat.prev_next_values[uqw];\nint prev_wt_uq = prev_and_next & 0xFF;\nint next_wt_uq = (prev_and_next >> 8) & 0xFF;\n@@ -201,7 +201,7 @@ static bool realign_weights_decimated(\n// Get the quantization table\nconst block_mode& bm = bsd.get_block_mode(scb.block_mode);\nunsigned int weight_quant_level = bm.quant_mode;\n- const quantization_and_transfer_table *qat = &(quant_and_xfer_tables[weight_quant_level]);\n+ const quant_and_transfer_table& qat = quant_and_xfer_tables[weight_quant_level];\n// Get the decimation table\nconst decimation_info& di = bsd.get_decimation_info(bm.decimation_mode);\n@@ -258,7 +258,7 @@ static bool realign_weights_decimated(\n{\nvint quant_value(dec_weights_quant_pvalue + we_idx);\n- vint unquant_value = gatheri(qat->unquantized_value, quant_value);\n+ vint unquant_value = gatheri(qat.unquantized_value, quant_value);\nstorea(unquant_value, uq_pl_weights + we_idx);\nvfloat unquant_valuef = int_to_float(unquant_value);\n@@ -271,7 +271,7 @@ static bool realign_weights_decimated(\nunsigned int uqw = uq_pl_weights[we_idx];\nfloat uqwf = uq_pl_weightsf[we_idx];\n- uint32_t prev_and_next = qat->prev_next_values[uqw];\n+ uint32_t prev_and_next = qat.prev_next_values[uqw];\nunsigned int prev_wt_uq = prev_and_next & 0xFF;\nunsigned int next_wt_uq = (prev_and_next >> 8) & 0xFF;\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_decompress_symbolic.cpp", "new_path": "Source/astcenc_decompress_symbolic.cpp", "diff": "@@ -136,7 +136,7 @@ void unpack_weights(\nalignas(ASTCENC_VECALIGN) int uq_plane2_weights[BLOCK_MAX_WEIGHTS];\nunsigned int weight_count = di.weight_count;\n- const quantization_and_transfer_table *qat = &(quant_and_xfer_tables[quant_level]);\n+ const quant_and_transfer_table& qat = quant_and_xfer_tables[quant_level];\n// Second, undecimate the weights ...\n// Safe to overshoot as all arrays are allocated to full size\n@@ -145,7 +145,7 @@ void unpack_weights(\nfor (unsigned int i = 0; i < weight_count; i += ASTCENC_SIMD_WIDTH)\n{\nvint quant_value(scb.weights + i);\n- vint unquant_value = gatheri(qat->unquantized_value, quant_value);\n+ vint unquant_value = gatheri(qat.unquantized_value, quant_value);\nstorea(unquant_value, uq_plane1_weights + i);\n}\n@@ -159,11 +159,11 @@ void unpack_weights(\nfor (unsigned int i = 0; i < weight_count; i += ASTCENC_SIMD_WIDTH)\n{\nvint quant_value1(scb.weights + i);\n- vint unquant_value1 = gatheri(qat->unquantized_value, quant_value1);\n+ vint unquant_value1 = gatheri(qat.unquantized_value, quant_value1);\nstorea(unquant_value1, uq_plane1_weights + i);\nvint quant_value2(scb.weights + i + WEIGHTS_PLANE2_OFFSET);\n- vint unquant_value2 = gatheri(qat->unquantized_value, quant_value2);\n+ vint unquant_value2 = gatheri(qat.unquantized_value, quant_value2);\nstorea(unquant_value2, uq_plane2_weights + i);\n}\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_ideal_endpoints_and_weights.cpp", "new_path": "Source/astcenc_ideal_endpoints_and_weights.cpp", "diff": "@@ -918,7 +918,7 @@ void compute_quantized_weights_for_decimation(\n) {\nint weight_count = di.weight_count;\npromise(weight_count > 0);\n- const quantization_and_transfer_table *qat = &(quant_and_xfer_tables[quant_level]);\n+ const quant_and_transfer_table& qat = quant_and_xfer_tables[quant_level];\n// The available quant levels, stored with a minus 1 bias\nstatic const float quant_levels_m1[12] {\n@@ -961,8 +961,8 @@ void compute_quantized_weights_for_decimation(\nvint weightl = float_to_int(ix1);\nvint weighth = weightl + vint(1);\n- vfloat ixl = gatherf(qat->unquantized_value_unsc, weightl);\n- vfloat ixh = gatherf(qat->unquantized_value_unsc, weighth);\n+ vfloat ixl = gatherf(qat.unquantized_value_unsc, weightl);\n+ vfloat ixh = gatherf(qat.unquantized_value_unsc, weighth);\nvmask mask = (ixl + ixh) < (vfloat(128.0f) * ix);\nvint weight = select(weightl, weighth, mask);\n@@ -970,7 +970,7 @@ void compute_quantized_weights_for_decimation(\n// Invert the weight-scaling that was done initially\nstorea(ixl * rscalev + low_boundv, &weight_set_out[i]);\n- vint scm = gatheri(qat->scramble_map, weight);\n+ vint scm = gatheri(qat.scramble_map, weight);\nvint scn = pack_low_bytes(scm);\nstore_nbytes(scn, &quantized_weight_set[i]);\n}\n@@ -1058,7 +1058,7 @@ void recompute_ideal_colors_1plane(\npromise(total_texel_count > 0);\npromise(partition_count > 0);\n- const quantization_and_transfer_table& qat = quant_and_xfer_tables[weight_quant_mode];\n+ const quant_and_transfer_table& qat = quant_and_xfer_tables[weight_quant_mode];\nalignas(ASTCENC_VECALIGN) float dec_weight[BLOCK_MAX_WEIGHTS];\nfor (unsigned int i = 0; i < weight_count; i += ASTCENC_SIMD_WIDTH)\n@@ -1283,7 +1283,7 @@ void recompute_ideal_colors_2planes(\npromise(total_texel_count > 0);\npromise(weight_count > 0);\n- const quantization_and_transfer_table& qat = quant_and_xfer_tables[weight_quant_mode];\n+ const quant_and_transfer_table& qat = quant_and_xfer_tables[weight_quant_mode];\nalignas(ASTCENC_VECALIGN) float dec_weight_plane1[BLOCK_MAX_WEIGHTS_2PLANE];\nalignas(ASTCENC_VECALIGN) float dec_weight_plane2[BLOCK_MAX_WEIGHTS_2PLANE];\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_internal.h", "new_path": "Source/astcenc_internal.h", "diff": "@@ -1179,7 +1179,7 @@ struct dt_init_working_buffers\n* floating-point weight. For each quantized weight, the corresponding unquantized values. For each\n* quantized weight, a previous-value and a next-value.\n*/\n-struct quantization_and_transfer_table\n+struct quant_and_transfer_table\n{\n/** @brief The quantization level used */\nquant_method method;\n@@ -1204,7 +1204,7 @@ struct quantization_and_transfer_table\n};\n/** @brief The precomputed quant and transfer table. */\n-extern const quantization_and_transfer_table quant_and_xfer_tables[12];\n+extern const quant_and_transfer_table quant_and_xfer_tables[12];\n/** @brief The block is an error block, and will return error color or NaN. */\nstatic constexpr uint8_t SYM_BTYPE_ERROR { 0 };\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_weight_quant_xfer_tables.cpp", "new_path": "Source/astcenc_weight_quant_xfer_tables.cpp", "diff": "#define _ 0 // Using _ to indicate an entry that will not be used.\n-const quantization_and_transfer_table quant_and_xfer_tables[12] {\n+const quant_and_transfer_table quant_and_xfer_tables[12] {\n// Quantization method 0, range 0..1\n{\nQUANT_2,\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Use references to quant_and_xfer tables
61,745
07.05.2022 22:31:44
-3,600
032cf324659915f362d8702d7ea8d309012912bc
Consistently use "base+off" rather than "&base[off]"
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_averages_and_directions.cpp", "new_path": "Source/astcenc_averages_and_directions.cpp", "diff": "@@ -822,7 +822,7 @@ void compute_error_squared_rgba(\nfor (unsigned int i = 0; i < texel_count; i += ASTCENC_SIMD_WIDTH)\n{\nvmask mask = lane_ids < vint(texel_count);\n- vint texel_idxs(&(texel_indexes[i]));\n+ vint texel_idxs(texel_indexes + i);\nvfloat data_r = gatherf(blk.data_r, texel_idxs);\nvfloat data_g = gatherf(blk.data_g, texel_idxs);\n@@ -961,7 +961,7 @@ void compute_error_squared_rgb(\nfor (unsigned int i = 0; i < texel_count; i += ASTCENC_SIMD_WIDTH)\n{\nvmask mask = lane_ids < vint(texel_count);\n- vint texel_idxs(&(texel_indexes[i]));\n+ vint texel_idxs(texel_indexes + i);\nvfloat data_r = gatherf(blk.data_r, texel_idxs);\nvfloat data_g = gatherf(blk.data_g, texel_idxs);\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_ideal_endpoints_and_weights.cpp", "new_path": "Source/astcenc_ideal_endpoints_and_weights.cpp", "diff": "@@ -952,7 +952,7 @@ void compute_quantized_weights_for_decimation(\n// safe data in compute_ideal_weights_for_decimation and arrays are always 64 elements\nfor (int i = 0; i < weight_count; i += ASTCENC_SIMD_WIDTH)\n{\n- vfloat ix = loada(&dec_weight_ideal_value[i]) * scalev - scaled_low_boundv;\n+ vfloat ix = loada(dec_weight_ideal_value + i) * scalev - scaled_low_boundv;\nix = clampzo(ix);\n// Look up the two closest indexes and return the one that was closest\n@@ -969,10 +969,10 @@ void compute_quantized_weights_for_decimation(\nixl = select(ixl, ixh, mask);\n// Invert the weight-scaling that was done initially\n- storea(ixl * rscalev + low_boundv, &weight_set_out[i]);\n+ storea(ixl * rscalev + low_boundv, weight_set_out + i);\nvint scm = gatheri(qat.scramble_map, weight);\nvint scn = pack_low_bytes(scm);\n- store_nbytes(scn, &quantized_weight_set[i]);\n+ store_nbytes(scn, quantized_weight_set + i);\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_pick_best_endpoint_format.cpp", "new_path": "Source/astcenc_pick_best_endpoint_format.cpp", "diff": "@@ -1316,7 +1316,7 @@ unsigned int compute_ideal_endpoint_formats(\nvint lane_ids = vint::lane_id() + vint(start_block_mode);\nfor (unsigned int j = start_block_mode; j < end_block_mode; j += ASTCENC_SIMD_WIDTH)\n{\n- vfloat err = vfloat(&errors_of_best_combination[j]);\n+ vfloat err = vfloat(errors_of_best_combination + j);\nvmask mask1 = err < vbest_ep_error;\nvmask mask2 = vint(reinterpret_cast<int*>(best_quant_levels + j)) > vint(4);\nvmask mask = mask1 & mask2;\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_weight_align.cpp", "new_path": "Source/astcenc_weight_align.cpp", "diff": "@@ -174,11 +174,11 @@ static void compute_lowest_and_highest_weight(\nvfloat errval = vfloat::zero();\nvfloat cut_low_weight_err = vfloat::zero();\nvfloat cut_high_weight_err = vfloat::zero();\n- vfloat offset = loada(&offsets[sp]);\n+ vfloat offset = loada(offsets + sp);\nfor (unsigned int j = 0; j < weight_count; ++j)\n{\n- vfloat sval = load1(&dec_weight_ideal_value[j]) * rcp_stepsize - offset;\n+ vfloat sval = load1(dec_weight_ideal_value + j) * rcp_stepsize - offset;\nvfloat svalrte = round(sval);\nvfloat diff = sval - svalrte;\nerrval += diff * diff;\n@@ -208,16 +208,16 @@ static void compute_lowest_and_highest_weight(\nvint span = float_to_int(maxidx - minidx + vfloat(1));\nspan = min(span, vint(max_quant_steps + 3));\nspan = max(span, vint(2));\n- storea(minidx, &lowest_weight[sp]);\n- storea(span, &weight_span[sp]);\n+ storea(minidx, lowest_weight + sp);\n+ storea(span, weight_span + sp);\n// The cut_(lowest/highest)_weight_error indicate the error that results from forcing\n// samples that should have had the weight value one step (up/down).\nvfloat ssize = 1.0f / rcp_stepsize;\nvfloat errscale = ssize * ssize;\n- storea(errval * errscale, &error[sp]);\n- storea(cut_low_weight_err * errscale, &cut_low_weight_error[sp]);\n- storea(cut_high_weight_err * errscale, &cut_high_weight_error[sp]);\n+ storea(errval * errscale, error + sp);\n+ storea(cut_low_weight_err * errscale, cut_low_weight_error + sp);\n+ storea(cut_high_weight_err * errscale, cut_high_weight_error + sp);\nrcp_stepsize = rcp_stepsize + vfloat(ASTCENC_SIMD_WIDTH);\n}\n@@ -370,11 +370,11 @@ static void compute_lowest_and_highest_weight_lwc(\nvfloat minidx(128.0f);\nvfloat maxidx(-128.0f);\nvfloat errval = vfloat::zero();\n- vfloat offset = loada(&offsets[sp]);\n+ vfloat offset = loada(offsets + sp);\nfor (unsigned int j = 0; j < weight_count; ++j)\n{\n- vfloat sval = load1(&dec_weight_quant_uvalue[j]) * rcp_stepsize - offset;\n+ vfloat sval = load1(dec_weight_quant_uvalue + j) * rcp_stepsize - offset;\nvfloat svalrte = round(sval);\nvfloat diff = sval - svalrte;\nerrval += diff * diff;\n@@ -388,14 +388,14 @@ static void compute_lowest_and_highest_weight_lwc(\nvint span = float_to_int(maxidx - minidx + vfloat(1.0f));\nspan = min(span, vint(max_quant_steps + 3));\nspan = max(span, vint(2));\n- storea(minidx, &lowest_weight[sp]);\n- storea(span, &weight_span[sp]);\n+ storea(minidx, lowest_weight + sp);\n+ storea(span, weight_span + sp);\n// The cut_(lowest/highest)_weight_error indicate the error that results from\n// forcing samples that should have had the weight value one step (up/down).\nvfloat ssize = 1.0f / rcp_stepsize;\nvfloat errscale = ssize * ssize;\n- storea(errval * errscale, &error[sp]);\n+ storea(errval * errscale, error + sp);\nrcp_stepsize = rcp_stepsize + vfloat(ASTCENC_SIMD_WIDTH);\n}\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Consistently use "base+off" rather than "&base[off]"
61,745
09.05.2022 23:16:23
-3,600
39f5b01c32c8c7a7fec16fe295cad9db38771d60
Use load/store rather than fetch/write naming
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_entry.cpp", "new_path": "Source/astcenc_entry.cpp", "diff": "@@ -842,7 +842,6 @@ static void compress_image(\n// Only the first thread actually runs the initializer\nctx.manage_compress.init(block_count);\n-\n// Determine if we can use an optimized load function\nbool needs_swz = (swizzle.r != ASTCENC_SWZ_R) || (swizzle.g != ASTCENC_SWZ_G) ||\n(swizzle.b != ASTCENC_SWZ_B) || (swizzle.a != ASTCENC_SWZ_A);\n@@ -853,10 +852,10 @@ static void compress_image(\nbool use_fast_load = !needs_swz && !needs_hdr &&\nblock_z == 1 && image.data_type == ASTCENC_TYPE_U8;\n- auto load_func = fetch_image_block;\n+ auto load_func = load_image_block;\nif (use_fast_load)\n{\n- load_func = fetch_image_block_fast_ldr;\n+ load_func = load_image_block_fast_ldr;\n}\n// All threads run this processing loop until there is no work remaining\n@@ -1143,7 +1142,7 @@ astcenc_error astcenc_decompress_image(\nx * block_x, y * block_y, z * block_z,\nscb, blk);\n- write_image_block(image_out, blk, *ctx->bsd,\n+ store_image_block(image_out, blk, *ctx->bsd,\nx * block_x, y * block_y, z * block_z, *swizzle);\n}\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_image.cpp", "new_path": "Source/astcenc_image.cpp", "diff": "@@ -148,7 +148,7 @@ static vfloat4 encode_texel_lns(\n}\n/* See header for documentation. */\n-void fetch_image_block(\n+void load_image_block(\nastcenc_profile decode_mode,\nconst astcenc_image& img,\nimage_block& blk,\n@@ -265,7 +265,7 @@ void fetch_image_block(\n}\n/* See header for documentation. */\n-void fetch_image_block_fast_ldr(\n+void load_image_block_fast_ldr(\nastcenc_profile decode_mode,\nconst astcenc_image& img,\nimage_block& blk,\n@@ -332,7 +332,7 @@ void fetch_image_block_fast_ldr(\n}\n/* See header for documentation. */\n-void write_image_block(\n+void store_image_block(\nastcenc_image& img,\nconst image_block& blk,\nconst block_size_descriptor& bsd,\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_internal.h", "new_path": "Source/astcenc_internal.h", "diff": "@@ -1761,7 +1761,7 @@ void compute_averages(\nconst avg_args& ag);\n/**\n- * @brief Fetch a single image block from the input image.\n+ * @brief Load a single image block from the input image.\n*\n* @param decode_mode The compression color profile.\n* @param img The input image data.\n@@ -1772,7 +1772,7 @@ void compute_averages(\n* @param zpos The block Z coordinate in the input image.\n* @param swz The swizzle to apply on load.\n*/\n-void fetch_image_block(\n+void load_image_block(\nastcenc_profile decode_mode,\nconst astcenc_image& img,\nimage_block& blk,\n@@ -1783,7 +1783,7 @@ void fetch_image_block(\nconst astcenc_swizzle& swz);\n/**\n- * @brief Fetch a single image block from the input image.\n+ * @brief Load a single image block from the input image.\n*\n* This specialized variant can be used only if the block is 2D LDR U8 data,\n* with no swizzle.\n@@ -1797,7 +1797,7 @@ void fetch_image_block(\n* @param zpos The block Z coordinate in the input image.\n* @param swz The swizzle to apply on load.\n*/\n-void fetch_image_block_fast_ldr(\n+void load_image_block_fast_ldr(\nastcenc_profile decode_mode,\nconst astcenc_image& img,\nimage_block& blk,\n@@ -1808,17 +1808,17 @@ void fetch_image_block_fast_ldr(\nconst astcenc_swizzle& swz);\n/**\n- * @brief Write a single image block from the output image.\n+ * @brief Store a single image block to the output image.\n*\n- * @param[out] img The input image data.\n- * @param blk The image block to populate.\n+ * @param[out] img The output image data.\n+ * @param blk The image block to export.\n* @param bsd The block size information.\n* @param xpos The block X coordinate in the input image.\n* @param ypos The block Y coordinate in the input image.\n* @param zpos The block Z coordinate in the input image.\n* @param swz The swizzle to apply on store.\n*/\n-void write_image_block(\n+void store_image_block(\nastcenc_image& img,\nconst image_block& blk,\nconst block_size_descriptor& bsd,\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Use load/store rather than fetch/write naming
61,745
11.05.2022 23:43:24
-3,600
47991de2b0da90499c898035c88c63f9a953506e
Add -repeat option to CLI to support performance testing
[ { "change_type": "MODIFY", "old_path": "Source/astcenccli_internal.h", "new_path": "Source/astcenccli_internal.h", "diff": "@@ -68,6 +68,9 @@ struct cli_config_options\n/** @brief The number of threads to use for processing. */\nunsigned int thread_count;\n+ /** @brief The number of repeats to execute for benchmarking. */\n+ unsigned int repeat_count;\n+\n/** @brief The number of image slices to load for a 3D image. */\nunsigned int array_size;\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenccli_toplevel.cpp", "new_path": "Source/astcenccli_toplevel.cpp", "diff": "@@ -948,6 +948,22 @@ static int edit_astcenc_config(\ncli_config.thread_count = atoi(argv[argidx - 1]);\n}\n+ else if (!strcmp(argv[argidx], \"-repeats\"))\n+ {\n+ argidx += 2;\n+ if (argidx > argc)\n+ {\n+ printf(\"ERROR: -repeats switch with no argument\\n\");\n+ return 1;\n+ }\n+\n+ cli_config.repeat_count = atoi(argv[argidx - 1]);\n+ if (cli_config.repeat_count <= 0)\n+ {\n+ printf(\"ERROR: -repeats value must be at least one\\n\");\n+ return 1;\n+ }\n+ }\nelse if (!strcmp(argv[argidx], \"-yflip\"))\n{\nargidx++;\n@@ -1369,7 +1385,6 @@ int main(\nbreak;\n}\n-\nstd::string input_filename = argc >= 3 ? argv[2] : \"\";\nstd::string output_filename = argc >= 4 ? argv[3] : \"\";\n@@ -1435,7 +1450,7 @@ int main(\n}\n// Initialize cli_config_options with default values\n- cli_config_options cli_config { 0, 1, false, false, -10, 10,\n+ cli_config_options cli_config { 0, 1, 1, false, false, -10, 10,\n{ ASTCENC_SWZ_R, ASTCENC_SWZ_G, ASTCENC_SWZ_B, ASTCENC_SWZ_A },\n{ ASTCENC_SWZ_R, ASTCENC_SWZ_G, ASTCENC_SWZ_B, ASTCENC_SWZ_A } };\n@@ -1555,8 +1570,6 @@ int main(\n}\n}\n- double start_coding_time = get_time();\n-\ndouble image_size = 0.0;\nif (image_uncomp_in)\n{\n@@ -1572,6 +1585,8 @@ int main(\n}\n// Compress an image\n+ double start_compression_time = 0.0;\n+ double end_compression_time = 0.0;\nif (operation & ASTCENC_STAGE_COMPRESS)\n{\nprint_astcenc_config(cli_config, config);\n@@ -1592,6 +1607,9 @@ int main(\n// Only launch worker threads for multi-threaded use - it makes basic\n// single-threaded profiling and debugging a little less convoluted\n+ start_compression_time = get_time();\n+ for (unsigned int i = 0; i < cli_config.repeat_count; i++)\n+ {\nif (cli_config.thread_count > 1)\n{\nlaunch_threads(cli_config.thread_count, compression_workload_runner, &work);\n@@ -1603,6 +1621,10 @@ int main(\nwork.data_out, work.data_len, 0);\n}\n+ astcenc_compress_reset(codec_context);\n+ }\n+ end_compression_time = get_time();\n+\nif (work.error != ASTCENC_SUCCESS)\n{\nprintf(\"ERROR: Codec compress failed: %s\\n\", astcenc_get_error_string(work.error));\n@@ -1620,6 +1642,8 @@ int main(\n}\n// Decompress an image\n+ double start_decompression_time = 0.0;\n+ double end_decompression_time = 0.0;\nif (operation & ASTCENC_STAGE_DECOMPRESS)\n{\nint out_bitness = get_output_filename_enforced_bitness(output_filename.c_str());\n@@ -1642,6 +1666,9 @@ int main(\n// Only launch worker threads for multi-threaded use - it makes basic\n// single-threaded profiling and debugging a little less convoluted\n+ start_decompression_time = get_time();\n+ for (unsigned int i = 0; i < cli_config.repeat_count; i++)\n+ {\nif (cli_config.thread_count > 1)\n{\nlaunch_threads(cli_config.thread_count, decompression_workload_runner, &work);\n@@ -1653,6 +1680,10 @@ int main(\nwork.image_out, &work.swizzle, 0);\n}\n+ astcenc_decompress_reset(codec_context);\n+ }\n+ end_decompression_time = get_time();\n+\nif (work.error != ASTCENC_SUCCESS)\n{\nprintf(\"ERROR: Codec decompress failed: %s\\n\", astcenc_get_error_string(codec_status));\n@@ -1660,8 +1691,6 @@ int main(\n}\n}\n- double end_coding_time = get_time();\n-\n// Print metrics in comparison mode\nif (operation & ASTCENC_STAGE_COMPARE)\n{\n@@ -1739,14 +1768,30 @@ int main(\nif ((operation & ASTCENC_STAGE_COMPARE) || (!cli_config.silentmode))\n{\ndouble end_time = get_time();\n- double tex_rate = image_size / (end_coding_time - start_coding_time);\n- tex_rate = tex_rate / 1000000.0;\n+\n+ double repeats = static_cast<double>(cli_config.repeat_count);\n+ double compression_time = (end_compression_time - start_compression_time) / repeats;\n+ double decompression_time = (end_decompression_time - start_decompression_time) / repeats;\n+ double total_time = (end_time - start_time) - ((repeats - 1.0) * compression_time) - ((repeats - 1.0) * decompression_time);\nprintf(\"Performance metrics\\n\");\nprintf(\"===================\\n\\n\");\n- printf(\" Total time: %8.4f s\\n\", end_time - start_time);\n- printf(\" Coding time: %8.4f s\\n\", end_coding_time - start_coding_time);\n- printf(\" Coding rate: %8.4f MT/s\\n\", tex_rate);\n+ printf(\" Total time: %8.4f s\\n\", total_time);\n+\n+ if (operation & ASTCENC_STAGE_COMPRESS)\n+ {\n+ double compression_rate = image_size / (compression_time * 1000000.0);\n+\n+ printf(\" Coding time: %8.4f s\\n\", compression_time);\n+ printf(\" Coding rate: %8.4f MT/s\\n\", compression_rate);\n+ }\n+\n+ if (operation & ASTCENC_STAGE_DECOMPRESS)\n+ {\n+ double decompression_rate = image_size / (decompression_time * 1000000.0);\n+ printf(\" Decoding time: %8.4f s\\n\", decompression_time);\n+ printf(\" Decoding rate: %8.4f MT/s\\n\", decompression_rate);\n+ }\n}\nreturn 0;\n" }, { "change_type": "MODIFY", "old_path": "Test/astc_test_image.py", "new_path": "Test/astc_test_image.py", "diff": "@@ -269,7 +269,7 @@ def get_encoder_params(encoderName, referenceName, imageSet):\n# Latest main\nif version == \"main\":\n- encoder = te.Encoder2x(simd)\n+ encoder = te.Encoder4x(simd)\nname = f\"reference-{version}-{simd}\"\noutDir = \"Test/Images/%s\" % imageSet\nrefName = None\n@@ -277,7 +277,7 @@ def get_encoder_params(encoderName, referenceName, imageSet):\nassert False, f\"Encoder {encoderName} not recognized\"\n- encoder = te.Encoder2x(encoderName)\n+ encoder = te.Encoder4x(encoderName)\nname = \"develop-%s\" % encoderName\noutDir = \"TestOutput/%s\" % imageSet\nrefName = referenceName.replace(\"ref\", \"reference\")\n" }, { "change_type": "MODIFY", "old_path": "Test/testlib/encoder.py", "new_path": "Test/testlib/encoder.py", "diff": "@@ -46,6 +46,7 @@ class EncoderBase():\nVERSION = None\nSWITCHES = None\nOUTPUTS = None\n+ HAS_REPEATS = False\ndef __init__(self, name, variant, binary):\n\"\"\"\n@@ -210,6 +211,12 @@ class EncoderBase():\n# pylint: disable=assignment-from-no-return\ncommand = self.build_cli(image, blockSize, preset, keepOutput, threads)\n+ # Inline repeats if the compressor supports it\n+ if self.HAS_REPEATS:\n+ command.append(\"-repeats\")\n+ command.append(f\"{testRuns}\")\n+ testRuns = 1\n+\n# Execute test runs\nbestPSNR = 0\nbestTTime = sys.float_info.max\n@@ -232,7 +239,6 @@ class EncoderBase():\nclass Encoder2x(EncoderBase):\n\"\"\"\nThis class wraps the latest `astcenc` 2.x series binaries from main branch.\n- branch.\n\"\"\"\nVERSION = \"main\"\n@@ -335,6 +341,30 @@ class Encoder2xRel(Encoder2x):\nsuper().__init__(variant, binary)\n+class Encoder4x(Encoder2x):\n+ \"\"\"\n+ This class wraps the latest `astcenc` 4.x series binaries from main branch.\n+ \"\"\"\n+ HAS_REPEATS = True\n+\n+\n+class Encoder4xRel(Encoder4x):\n+ \"\"\"\n+ This class wraps a released 4.x series binary.\n+ \"\"\"\n+\n+ def __init__(self, version, variant):\n+\n+ self.VERSION = version\n+\n+ if os.name == 'nt':\n+ binary = f\"./Binaries/{version}/astcenc-{variant}.exe\"\n+ else:\n+ binary = f\"./Binaries/{version}/astcenc-{variant}\"\n+\n+ super().__init__(variant, binary)\n+\n+\nclass Encoder1_7(EncoderBase):\n\"\"\"\nThis class wraps the 1.7 series binaries.\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Add -repeat option to CLI to support performance testing (#334)
61,745
11.05.2022 23:46:59
-3,600
82d4c70e5bc0e10ff67f4fc3043ba54cf169fcf5
Increase version to 4.0.0
[ { "change_type": "MODIFY", "old_path": "CMakeLists.txt", "new_path": "CMakeLists.txt", "diff": "@@ -24,7 +24,7 @@ if(MSVC)\nadd_compile_options(\"/wd4324\") # Disable structure was padded due to alignment specifier\nendif()\n-project(astcencoder VERSION 3.7.0)\n+project(astcencoder VERSION 4.0.0)\nset(CMAKE_CXX_STANDARD 14)\nset(CMAKE_CXX_STANDARD_REQUIRED ON)\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Increase version to 4.0.0
61,745
11.05.2022 23:56:02
-3,600
a9bb8c033ff083b212d2aecf9ba462df1a3316c8
Add 4.0 change log
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -58,6 +58,9 @@ from 0.89 bits/pixel up to 8 bits/pixel.\nRelease build binaries for the `astcenc` stable releases are provided in the\n[GitHub Releases page][3].\n+**Latest 4.x stable release:** TBD\n+* Change log: [4.x series](./Docs/ChangeLog-4x.md)\n+\n**Latest 3.x stable release:** 3.7\n* Change log: [3.x series](./Docs/ChangeLog-3x.md)\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Add 4.0 change log
61,745
13.05.2022 19:04:28
-3,600
91bfec2d32fc042ada25f547f3a6927a51985a8d
Early out same-endpoint color mode on first fail
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_compress_symbolic.cpp", "new_path": "Source/astcenc_compress_symbolic.cpp", "diff": "@@ -556,7 +556,8 @@ static float compress_symbolic_block_for_partition_1plane(\nblk, pi, di, workscb.weights,\nworkep, rgbs_colors, rgbo_colors);\n- // Quantize the chosen color\n+ // Quantize the chosen color, tracking if worth trying the mod value\n+ bool all_same = color_quant_level[i] != color_quant_level_mod[i];\nfor (unsigned int j = 0; j < partition_count; j++)\n{\nworkscb.color_formats[j] = pack_color_endpoints(\n@@ -567,20 +568,19 @@ static float compress_symbolic_block_for_partition_1plane(\npartition_format_specifiers[i][j],\nworkscb.color_values[j],\ncolor_quant_level[i]);\n+\n+ all_same = all_same && workscb.color_formats[j] == workscb.color_formats[0];\n}\n// If all the color endpoint modes are the same, we get a few more bits to store colors;\n// let's see if we can take advantage of this: requantize all the colors and see if the\n// endpoint modes remain the same.\nworkscb.color_formats_matched = 0;\n-\n- if ((partition_count >= 2 && workscb.color_formats[0] == workscb.color_formats[1]\n- && color_quant_level[i] != color_quant_level_mod[i])\n- && (partition_count == 2 || (workscb.color_formats[0] == workscb.color_formats[2]\n- && (partition_count == 3 || (workscb.color_formats[0] == workscb.color_formats[3])))))\n+ if (partition_count >= 2 && all_same)\n{\nuint8_t colorvals[BLOCK_MAX_PARTITIONS][12];\nuint8_t color_formats_mod[BLOCK_MAX_PARTITIONS] { 0 };\n+ bool all_same_mod = true;\nfor (unsigned int j = 0; j < partition_count; j++)\n{\ncolor_formats_mod[j] = pack_color_endpoints(\n@@ -591,11 +591,16 @@ static float compress_symbolic_block_for_partition_1plane(\npartition_format_specifiers[i][j],\ncolorvals[j],\ncolor_quant_level_mod[i]);\n+\n+ // Early out as soon as it's no longer possible to use mod\n+ if (color_formats_mod[j] != color_formats_mod[0])\n+ {\n+ all_same_mod = false;\n+ break;\n+ }\n}\n- if (color_formats_mod[0] == color_formats_mod[1]\n- && (partition_count == 2 || (color_formats_mod[0] == color_formats_mod[2]\n- && (partition_count == 3 || (color_formats_mod[0] == color_formats_mod[3])))))\n+ if (all_same_mod)\n{\nworkscb.color_formats_matched = 1;\nfor (unsigned int j = 0; j < BLOCK_MAX_PARTITIONS; j++)\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Early out same-endpoint color mode on first fail
61,745
13.05.2022 19:04:42
-3,600
b9d55db4f6d62865ca2ed04164c9fa3b61920210
Valgrind wrapper prints totals
[ { "change_type": "MODIFY", "old_path": "Test/astc_profile_valgrind.py", "new_path": "Test/astc_profile_valgrind.py", "diff": "@@ -82,13 +82,19 @@ def postprocess_cga(logfile, outfile):\nfunction[2] *= 100.0\nwith open(outfile, \"w\") as fileHandle:\n+\n+ totals = 0.0\nfor function in functionTable:\n# Omit entries less than 1% load\nif function[2] < 1:\nbreak\n+ totals += function[2]\nfileHandle.write(\"%5.2f%% %s\\n\" % (function[2], function[0]))\n+ fileHandle.write(\"======\\n\")\n+ fileHandle.write(f\"{totals:5.2f}%\\n\")\n+\ndef run_pass(image, noStartup, encoder, blocksize, quality):\n\"\"\"\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Valgrind wrapper prints totals
61,745
13.05.2022 19:33:14
-3,600
12f38c1c48274e6274b54388d4b2b0bad04854e3
Change -repeats to keep best result
[ { "change_type": "MODIFY", "old_path": "Source/astcenccli_toplevel.cpp", "new_path": "Source/astcenccli_toplevel.cpp", "diff": "@@ -1585,8 +1585,8 @@ int main(\n}\n// Compress an image\n- double start_compression_time = 0.0;\n- double end_compression_time = 0.0;\n+ double best_compression_time = 100000.0;\n+ double total_compression_time = 0.0;\nif (operation & ASTCENC_STAGE_COMPRESS)\n{\nprint_astcenc_config(cli_config, config);\n@@ -1607,9 +1607,10 @@ int main(\n// Only launch worker threads for multi-threaded use - it makes basic\n// single-threaded profiling and debugging a little less convoluted\n- start_compression_time = get_time();\n+ double start_compression_time = get_time();\nfor (unsigned int i = 0; i < cli_config.repeat_count; i++)\n{\n+ double start_iter_time = get_time();\nif (cli_config.thread_count > 1)\n{\nlaunch_threads(cli_config.thread_count, compression_workload_runner, &work);\n@@ -1622,8 +1623,11 @@ int main(\n}\nastcenc_compress_reset(codec_context);\n+\n+ double iter_time = get_time() - start_iter_time;\n+ best_compression_time = astc::min(iter_time, best_compression_time);\n}\n- end_compression_time = get_time();\n+ total_compression_time = get_time() - start_compression_time;\nif (work.error != ASTCENC_SUCCESS)\n{\n@@ -1642,8 +1646,8 @@ int main(\n}\n// Decompress an image\n- double start_decompression_time = 0.0;\n- double end_decompression_time = 0.0;\n+ double best_decompression_time = 100000.0;\n+ double total_decompression_time = 0.0;\nif (operation & ASTCENC_STAGE_DECOMPRESS)\n{\nint out_bitness = get_output_filename_enforced_bitness(output_filename.c_str());\n@@ -1666,9 +1670,10 @@ int main(\n// Only launch worker threads for multi-threaded use - it makes basic\n// single-threaded profiling and debugging a little less convoluted\n- start_decompression_time = get_time();\n+ double start_decompression_time = get_time();\nfor (unsigned int i = 0; i < cli_config.repeat_count; i++)\n{\n+ double start_iter_time = get_time();\nif (cli_config.thread_count > 1)\n{\nlaunch_threads(cli_config.thread_count, decompression_workload_runner, &work);\n@@ -1681,8 +1686,11 @@ int main(\n}\nastcenc_decompress_reset(codec_context);\n+\n+ double iter_time = get_time() - start_iter_time;\n+ best_decompression_time = astc::min(iter_time, best_decompression_time);\n}\n- end_decompression_time = get_time();\n+ total_decompression_time = get_time() - start_decompression_time;\nif (work.error != ASTCENC_SUCCESS)\n{\n@@ -1770,9 +1778,9 @@ int main(\ndouble end_time = get_time();\ndouble repeats = static_cast<double>(cli_config.repeat_count);\n- double compression_time = (end_compression_time - start_compression_time) / repeats;\n- double decompression_time = (end_decompression_time - start_decompression_time) / repeats;\n- double total_time = (end_time - start_time) - ((repeats - 1.0) * compression_time) - ((repeats - 1.0) * decompression_time);\n+ double avg_compression_time = total_compression_time / repeats;\n+ double avg_decompression_time = total_decompression_time / repeats;\n+ double total_time = (end_time - start_time) - ((repeats - 1.0) * avg_compression_time) - ((repeats - 1.0) * avg_decompression_time);\nprintf(\"Performance metrics\\n\");\nprintf(\"===================\\n\\n\");\n@@ -1780,16 +1788,16 @@ int main(\nif (operation & ASTCENC_STAGE_COMPRESS)\n{\n- double compression_rate = image_size / (compression_time * 1000000.0);\n+ double compression_rate = image_size / (best_compression_time * 1000000.0);\n- printf(\" Coding time: %8.4f s\\n\", compression_time);\n+ printf(\" Coding time: %8.4f s\\n\", best_compression_time);\nprintf(\" Coding rate: %8.4f MT/s\\n\", compression_rate);\n}\nif (operation & ASTCENC_STAGE_DECOMPRESS)\n{\n- double decompression_rate = image_size / (decompression_time * 1000000.0);\n- printf(\" Decoding time: %8.4f s\\n\", decompression_time);\n+ double decompression_rate = image_size / (best_decompression_time * 1000000.0);\n+ printf(\" Decoding time: %8.4f s\\n\", best_decompression_time);\nprintf(\" Decoding rate: %8.4f MT/s\\n\", decompression_rate);\n}\n}\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Change -repeats to keep best result
61,745
13.05.2022 22:27:14
-3,600
ba734c0e7c19514439cada76427fcd2cd9c9bad7
Re-add -a alpha error scaling based on block max alpha
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_entry.cpp", "new_path": "Source/astcenc_entry.cpp", "diff": "@@ -654,7 +654,7 @@ astcenc_error astcenc_config_init(\n//\n// ... but we scale these up to keep a better balance between color and alpha. Note\n// that if the content is using alpha we'd recommend using the -a option to weight\n- // the color conribution by the alpha transparency.\n+ // the color contribution by the alpha transparency.\nif (flags & ASTCENC_FLG_USE_PERCEPTUAL)\n{\nconfig.cw_r_weight = 0.30f * 2.25f;\n@@ -916,6 +916,18 @@ static void compress_image(\nif (use_full_block)\n{\nload_func(decode_mode, image, blk, bsd, x * block_x, y * block_y, z * block_z, swizzle);\n+\n+ // Scale RGB error contribution by the maximum alpha in the block\n+ // This encourages preserving alpha accuracy in regions with high\n+ // transparency, and can buy up to 0.5 dB PSNR.\n+ if (ctx.config.flags & ASTCENC_FLG_USE_ALPHA_WEIGHT)\n+ {\n+ float alpha_scale = blk.data_max.lane<3>() * (1.0f / 65535.0f);\n+ blk.channel_weight = vfloat4(ctx.config.cw_r_weight * alpha_scale,\n+ ctx.config.cw_g_weight * alpha_scale,\n+ ctx.config.cw_b_weight * alpha_scale,\n+ ctx.config.cw_a_weight);\n+ }\n}\n// Apply alpha scale RDO - substitute constant color block\nelse\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Re-add -a alpha error scaling based on block max alpha
61,745
17.05.2022 19:03:55
-3,600
d9f3b9df5ab21e3ba3381f8cd88db14357953283
Reduce -fastest quality level IQ is still higher than 3.7, but this re-extends the range of the cost-qiuality curve without any appreciable change in the shape of the curve.
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_entry.cpp", "new_path": "Source/astcenc_entry.cpp", "diff": "@@ -62,7 +62,7 @@ struct astcenc_preset_config\nstatic const std::array<astcenc_preset_config, 5> preset_configs_high {{\n{\nASTCENC_PRE_FASTEST,\n- 2, 14, 44, 2, 2, 85.2f, 63.2f, 3.5f, 3.5f, 1.0f, 1.0f, 0.5f, 25\n+ 2, 10, 42, 2, 2, 85.2f, 63.2f, 3.5f, 3.5f, 1.0f, 1.0f, 0.5f, 25\n}, {\nASTCENC_PRE_FAST,\n3, 14, 55, 3, 3, 85.2f, 63.2f, 3.5f, 3.5f, 1.0f, 1.1f, 0.65f, 20\n@@ -85,7 +85,7 @@ static const std::array<astcenc_preset_config, 5> preset_configs_high {{\nstatic const std::array<astcenc_preset_config, 5> preset_configs_mid {{\n{\nASTCENC_PRE_FASTEST,\n- 2, 14, 43, 2, 2, 85.2f, 63.2f, 3.5f, 3.5f, 1.0f, 1.0f, 0.5f, 20\n+ 2, 10, 42, 2, 2, 85.2f, 63.2f, 3.5f, 3.5f, 1.0f, 1.0f, 0.5f, 20\n}, {\nASTCENC_PRE_FAST,\n3, 15, 55, 3, 3, 85.2f, 63.2f, 3.5f, 3.5f, 1.0f, 1.1f, 0.5f, 16\n@@ -109,7 +109,7 @@ static const std::array<astcenc_preset_config, 5> preset_configs_mid {{\nstatic const std::array<astcenc_preset_config, 5> preset_configs_low {{\n{\nASTCENC_PRE_FASTEST,\n- 2, 14, 42, 2, 2, 85.0f, 63.0f, 3.5f, 3.5f, 1.0f, 1.0f, 0.5f, 20\n+ 2, 10, 40, 2, 2, 85.0f, 63.0f, 3.5f, 3.5f, 1.0f, 1.0f, 0.5f, 20\n}, {\nASTCENC_PRE_FAST,\n2, 15, 55, 3, 3, 85.0f, 63.0f, 3.5f, 3.5f, 1.0f, 1.1f, 0.5f, 16\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Reduce -fastest quality level IQ is still higher than 3.7, but this re-extends the range of the cost-qiuality curve without any appreciable change in the shape of the curve.
61,745
18.05.2022 08:31:12
-3,600
12c75dae3a868129434996f5876e293302513de9
Cleanup realign weights
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_compress_symbolic.cpp", "new_path": "Source/astcenc_compress_symbolic.cpp", "diff": "@@ -129,41 +129,39 @@ static bool realign_weights_undecimated(\nint uqw = dec_weights_uquant[texel];\nuint32_t prev_and_next = qat.prev_next_values[uqw];\n- int prev_wt_uq = prev_and_next & 0xFF;\n- int next_wt_uq = (prev_and_next >> 8) & 0xFF;\n+ int uqw_down = prev_and_next & 0xFF;\n+ int uqw_up = (prev_and_next >> 8) & 0xFF;\n// Interpolate the colors to create the diffs\n- unsigned int partition = pi.partition_of_texel[texel];\n-\n- float plane_weight = static_cast<float>(uqw);\n- float plane_up_weight = static_cast<float>(next_wt_uq - uqw);\n- float plane_down_weight = static_cast<float>(prev_wt_uq - uqw);\n+ float weight_base = static_cast<float>(uqw);\n+ float weight_down = static_cast<float>(uqw_down - uqw);\n+ float weight_up = static_cast<float>(uqw_up - uqw);\n+ unsigned int partition = pi.partition_of_texel[texel];\nvfloat4 color_offset = offset[partition];\nvfloat4 color_base = endpnt0f[partition];\n- vfloat4 color = color_base + color_offset * plane_weight;\n-\n+ vfloat4 color = color_base + color_offset * weight_base;\nvfloat4 orig_color = blk.texel(texel);\nvfloat4 error_weight = blk.channel_weight;\nvfloat4 color_diff = color - orig_color;\n- vfloat4 color_up_diff = color_diff + color_offset * plane_up_weight;\n- vfloat4 color_down_diff = color_diff + color_offset * plane_down_weight;\n+ vfloat4 color_diff_down = color_diff + color_offset * weight_down;\n+ vfloat4 color_diff_up = color_diff + color_offset * weight_up;\n- float current_error = dot_s(color_diff * color_diff, error_weight);\n- float up_error = dot_s(color_up_diff * color_up_diff, error_weight);\n- float down_error = dot_s(color_down_diff * color_down_diff, error_weight);\n+ float error_base = dot_s(color_diff * color_diff, error_weight);\n+ float error_down = dot_s(color_diff_down * color_diff_down, error_weight);\n+ float error_up = dot_s(color_diff_up * color_diff_up, error_weight);\n// Check if the prev or next error is better, and if so use it\n- if ((up_error < current_error) && (up_error < down_error) && (uqw < 64))\n+ if ((error_up < error_base) && (error_up < error_down) && (uqw < 64))\n{\n- dec_weights_uquant[texel] = next_wt_uq;\n+ dec_weights_uquant[texel] = uqw_up;\nadjustments = true;\n}\n- else if ((down_error < current_error) && (uqw > 0))\n+ else if ((error_down < error_base) && (uqw > 0))\n{\n- dec_weights_uquant[texel] = prev_wt_uq;\n+ dec_weights_uquant[texel] = uqw_down;\nadjustments = true;\n}\n}\n@@ -234,8 +232,6 @@ static bool realign_weights_decimated(\nendpnt1[pa_idx]);\n}\n- alignas(ASTCENC_VECALIGN) float uq_pl_weightsf[BLOCK_MAX_WEIGHTS];\n-\nuint8_t* dec_weights_uquant = scb.weights;\nbool adjustments = false;\n@@ -253,29 +249,30 @@ static bool realign_weights_decimated(\n}\n// Create an unquantized weight grid for this decimation level\n+ alignas(ASTCENC_VECALIGN) float uq_weightsf[BLOCK_MAX_WEIGHTS];\nfor (unsigned int we_idx = 0; we_idx < weight_count; we_idx += ASTCENC_SIMD_WIDTH)\n{\nvint unquant_value(dec_weights_uquant + we_idx);\nvfloat unquant_valuef = int_to_float(unquant_value);\n- storea(unquant_valuef, uq_pl_weightsf + we_idx);\n+ storea(unquant_valuef, uq_weightsf + we_idx);\n}\n// For each weight compute previous, current, and next errors\nfor (unsigned int we_idx = 0; we_idx < weight_count; we_idx++)\n{\nint uqw = dec_weights_uquant[we_idx];\n- float uqwf = uq_pl_weightsf[we_idx];\n-\nuint32_t prev_and_next = qat.prev_next_values[uqw];\n- unsigned int prev_wt_uq = prev_and_next & 0xFF;\n- unsigned int next_wt_uq = (prev_and_next >> 8) & 0xFF;\n- float uqw_next_dif = static_cast<float>(next_wt_uq) - uqwf;\n- float uqw_prev_dif = static_cast<float>(prev_wt_uq) - uqwf;\n+ float uqw_base = uq_weightsf[we_idx];\n+ float uqw_down = static_cast<float>(prev_and_next & 0xFF);\n+ float uqw_up = static_cast<float>((prev_and_next >> 8) & 0xFF);\n- vfloat4 current_errorv = vfloat4::zero();\n- vfloat4 up_errorv = vfloat4::zero();\n- vfloat4 down_errorv = vfloat4::zero();\n+ float uqw_diff_down = uqw_down - uqw_base;\n+ float uqw_diff_up = uqw_up - uqw_base;\n+\n+ vfloat4 error_basev = vfloat4::zero();\n+ vfloat4 error_downv = vfloat4::zero();\n+ vfloat4 error_upv = vfloat4::zero();\n// Interpolate the colors to create the diffs\nunsigned int texels_to_evaluate = di.weight_texel_count[we_idx];\n@@ -283,60 +280,56 @@ static bool realign_weights_decimated(\nfor (unsigned int te_idx = 0; te_idx < texels_to_evaluate; te_idx++)\n{\nunsigned int texel = di.weight_texel[te_idx][we_idx];\n- float weight_base = uqwf;\nconst uint8_t *texel_weights = di.texel_weights_texel[we_idx][te_idx];\nconst float *texel_weights_float = di.texel_weights_float_texel[we_idx][te_idx];\n- float twf0 = texel_weights_float[0];\n- weight_base = (uqwf * twf0\n- + uq_pl_weightsf[texel_weights[1]] * texel_weights_float[1])\n- + (uq_pl_weightsf[texel_weights[2]] * texel_weights_float[2]\n- + uq_pl_weightsf[texel_weights[3]] * texel_weights_float[3]);\n+ float tw_base = texel_weights_float[0];\n- unsigned int partition = pi.partition_of_texel[texel];\n+ float weight_base = (uqw_base * tw_base\n+ + uq_weightsf[texel_weights[1]] * texel_weights_float[1])\n+ + (uq_weightsf[texel_weights[2]] * texel_weights_float[2]\n+ + uq_weightsf[texel_weights[3]] * texel_weights_float[3]);\n// Ideally this is integer rounded, but IQ gain it isn't worth the overhead\n- // float plane_weight = astc::flt_rd(weight_base + 0.5f);\n- // float plane_up_weight = astc::flt_rd(weight_base + 0.5f + uqw_next_dif * twf0) - plane_weight;\n- // float plane_down_weight = astc::flt_rd(weight_base + 0.5f + uqw_prev_dif * twf0) - plane_weight;\n-\n- float plane_weight = weight_base;\n- float plane_up_weight = weight_base + uqw_next_dif * twf0 - plane_weight;\n- float plane_down_weight = weight_base + uqw_prev_dif * twf0 - plane_weight;\n+ // float weight = astc::flt_rd(weight_base + 0.5f);\n+ // float weight_down = astc::flt_rd(weight_base + 0.5f + uqw_diff_down * tw_base) - weight;\n+ // float weight_up = astc::flt_rd(weight_base + 0.5f + uqw_diff_up * tw_base) - weight;\n+ float weight_down = weight_base + uqw_diff_down * tw_base - weight_base;\n+ float weight_up = weight_base + uqw_diff_up * tw_base - weight_base;\n+ unsigned int partition = pi.partition_of_texel[texel];\nvfloat4 color_offset = offset[partition];\nvfloat4 color_base = endpnt0f[partition];\n- vfloat4 color = color_base + color_offset * plane_weight;\n-\n+ vfloat4 color = color_base + color_offset * weight_base;\nvfloat4 orig_color = blk.texel(texel);\nvfloat4 color_diff = color - orig_color;\n- vfloat4 color_up_diff = color_diff + color_offset * plane_up_weight;\n- vfloat4 color_down_diff = color_diff + color_offset * plane_down_weight;\n+ vfloat4 color_down_diff = color_diff + color_offset * weight_down;\n+ vfloat4 color_up_diff = color_diff + color_offset * weight_up;\n- current_errorv += color_diff * color_diff;\n- up_errorv += color_up_diff * color_up_diff;\n- down_errorv += color_down_diff * color_down_diff;\n+ error_basev += color_diff * color_diff;\n+ error_downv += color_down_diff * color_down_diff;\n+ error_upv += color_up_diff * color_up_diff;\n}\nvfloat4 error_weight = blk.channel_weight;\n- float current_error = hadd_s(current_errorv * error_weight);\n- float up_error = hadd_s(up_errorv * error_weight);\n- float down_error = hadd_s(down_errorv * error_weight);\n+ float error_base = hadd_s(error_basev * error_weight);\n+ float error_down = hadd_s(error_downv * error_weight);\n+ float error_up = hadd_s(error_upv * error_weight);\n// Check if the prev or next error is better, and if so use it\n- if ((up_error < current_error) && (up_error < down_error) && (uqw < 64))\n+ if ((error_up < error_base) && (error_up < error_down) && (uqw < 64))\n{\n- uq_pl_weightsf[we_idx] = static_cast<float>(next_wt_uq);\n- dec_weights_uquant[we_idx] = next_wt_uq;\n+ uq_weightsf[we_idx] = uqw_up;\n+ dec_weights_uquant[we_idx] = uqw_up;\nadjustments = true;\n}\n- else if ((down_error < current_error) && (uqw > 0))\n+ else if ((error_down < error_base) && (uqw > 0))\n{\n- uq_pl_weightsf[we_idx] = static_cast<float>(prev_wt_uq);\n- dec_weights_uquant[we_idx] = prev_wt_uq;\n+ uq_weightsf[we_idx] = uqw_down;\n+ dec_weights_uquant[we_idx] = uqw_down;\nadjustments = true;\n}\n}\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Cleanup realign weights
61,745
30.05.2022 09:52:26
-3,600
e7f6502c04efc8209265c96de2f75c6f816b678b
Add explicit static casts
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_compress_symbolic.cpp", "new_path": "Source/astcenc_compress_symbolic.cpp", "diff": "@@ -323,13 +323,13 @@ static bool realign_weights_decimated(\nif ((error_up < error_base) && (error_up < error_down) && (uqw < 64))\n{\nuq_weightsf[we_idx] = uqw_up;\n- dec_weights_uquant[we_idx] = uqw_up;\n+ dec_weights_uquant[we_idx] = static_cast<uint8_t>(uqw_up);\nadjustments = true;\n}\nelse if ((error_down < error_base) && (uqw > 0))\n{\nuq_weightsf[we_idx] = uqw_down;\n- dec_weights_uquant[we_idx] = uqw_down;\n+ dec_weights_uquant[we_idx] = static_cast<uint8_t>(uqw_down);\nadjustments = true;\n}\n}\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Add explicit static casts
61,745
03.06.2022 23:30:42
-3,600
f749b59f0bc97c8acd5415f3b662297c615cbab0
Use smaller vtable when possible
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_ideal_endpoints_and_weights.cpp", "new_path": "Source/astcenc_ideal_endpoints_and_weights.cpp", "diff": "@@ -949,13 +949,48 @@ void compute_quantized_weights_for_decimation(\nvfloat rscalev(rscale);\nvfloat low_boundv(low_bound);\n+ // This runs to the rounded-up SIMD size, which is safe as the loop tail is filled with known\n+ // safe data in compute_ideal_weights_for_decimation and arrays are always 64 elements\n+ if (get_quant_level(quant_level) <= 16)\n+ {\n+ vint4 tab0(reinterpret_cast<const int*>(qat.quant_to_unquant));\n+ vint tab0p;\n+ vtable_prepare(tab0, tab0p);\n+\n+ for (int i = 0; i < weight_count; i += ASTCENC_SIMD_WIDTH)\n+ {\n+ vfloat ix = loada(dec_weight_ideal_value + i) * scalev - scaled_low_boundv;\n+ ix = clampzo(ix);\n+\n+ // Look up the two closest indexes and return the one that was closest\n+ vfloat ix1 = ix * quant_level_m1v;\n+\n+ vint weightl = float_to_int(ix1);\n+ vint weighth = min(weightl + vint(1), steps_m1);\n+\n+ vint ixli = vtable_8bt_32bi(tab0p, weightl);\n+ vint ixhi = vtable_8bt_32bi(tab0p, weighth);\n+\n+ vfloat ixl = int_to_float(ixli);\n+ vfloat ixh = int_to_float(ixhi);\n+\n+ vmask mask = (ixl + ixh) < (vfloat(128.0f) * ix);\n+ vint weight = select(ixli, ixhi, mask);\n+ ixl = select(ixl, ixh, mask);\n+\n+ // Invert the weight-scaling that was done initially\n+ storea(ixl * rscalev + low_boundv, weight_set_out + i);\n+ vint scn = pack_low_bytes(weight);\n+ store_nbytes(scn, quantized_weight_set + i);\n+ }\n+ }\n+ else\n+ {\nvint4 tab0(reinterpret_cast<const int*>(qat.quant_to_unquant));\nvint4 tab1(reinterpret_cast<const int*>(qat.quant_to_unquant + 16));\nvint tab0p, tab1p;\nvtable_prepare(tab0, tab1, tab0p, tab1p);\n- // This runs to the rounded-up SIMD size, which is safe as the loop tail is filled with known\n- // safe data in compute_ideal_weights_for_decimation and arrays are always 64 elements\nfor (int i = 0; i < weight_count; i += ASTCENC_SIMD_WIDTH)\n{\nvfloat ix = loada(dec_weight_ideal_value + i) * scalev - scaled_low_boundv;\n@@ -983,6 +1018,7 @@ void compute_quantized_weights_for_decimation(\nstore_nbytes(scn, quantized_weight_set + i);\n}\n}\n+}\n/**\n* @brief Compute the RGB + offset for a HDR endpoint mode #7.\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_avx2_8.h", "new_path": "Source/astcenc_vecmathlib_avx2_8.h", "diff": "@@ -1004,6 +1004,16 @@ ASTCENC_SIMD_INLINE vfloat8 int_as_float(vint8 a)\nreturn vfloat8(_mm256_castsi256_ps(a.m));\n}\n+/**\n+ * @brief Prepare a vtable lookup table for use with the native SIMD size.\n+ */\n+ASTCENC_SIMD_INLINE void vtable_prepare(vint4 t0, vint8& t0p)\n+{\n+ // AVX2 duplicates the table within each 128-bit lane\n+ __m128i t0n = t0.m;\n+ t0p = vint8(_mm256_set_m128i(t0n, t0n));\n+}\n+\n/**\n* @brief Prepare a vtable lookup table for use with the native SIMD size.\n*/\n@@ -1038,6 +1048,18 @@ ASTCENC_SIMD_INLINE void vtable_prepare(\nt3p = vint8(_mm256_set_m128i(t3n, t3n));\n}\n+/**\n+ * @brief Perform an 8-bit 16-entry table lookup, with 32-bit indexes.\n+ */\n+ASTCENC_SIMD_INLINE vint8 vtable_8bt_32bi(vint8 t0, vint8 idx)\n+{\n+ // Set index byte MSB to 1 for unused bytes so shuffle returns zero\n+ __m256i idxx = _mm256_or_si256(idx.m, _mm256_set1_epi32(0xFFFFFF00));\n+\n+ __m256i result = _mm256_shuffle_epi8(t0.m, idxx);\n+ return vint8(result);\n+}\n+\n/**\n* @brief Perform an 8-bit 32-entry table lookup, with 32-bit indexes.\n*/\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_neon_4.h", "new_path": "Source/astcenc_vecmathlib_neon_4.h", "diff": "@@ -924,6 +924,15 @@ ASTCENC_SIMD_INLINE vfloat4 int_as_float(vint4 v)\nreturn vfloat4(vreinterpretq_f32_s32(v.m));\n}\n+/**\n+ * @brief Prepare a vtable lookup table for use with the native SIMD size.\n+ */\n+ASTCENC_SIMD_INLINE void vtable_prepare(vint4 t0, vint4& t0p)\n+{\n+ t0p = t0;\n+}\n+\n+\n/**\n* @brief Prepare a vtable lookup table for use with the native SIMD size.\n*/\n@@ -946,6 +955,20 @@ ASTCENC_SIMD_INLINE void vtable_prepare(\nt3p = t3;\n}\n+/**\n+ * @brief Perform an 8-bit 16-entry table lookup, with 32-bit indexes.\n+ */\n+ASTCENC_SIMD_INLINE vint4 vtable_8bt_32bi(vint4 t0, vint4 idx)\n+{\n+ int8x16x2_t table { t0.m };\n+\n+ // Set index byte MSB to 1 for unused bytes so shuffle returns zero\n+ int32x4_t idx_masked = vorrq_s32(idx.m, vdupq_n_s32(0xFFFFFF00));\n+ int8x16_t idx_bytes= vreinterpretq_u8_s32(idx_masked);\n+\n+ return vint4(vqtbl1q_s8(table, idx_bytes));\n+}\n+\n/**\n* @brief Perform an 8-bit 32-entry table lookup, with 32-bit indexes.\n*/\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_none_4.h", "new_path": "Source/astcenc_vecmathlib_none_4.h", "diff": "@@ -1043,6 +1043,14 @@ ASTCENC_SIMD_INLINE vfloat4 int_as_float(vint4 a)\nreturn r;\n}\n+/**\n+ * @brief Prepare a vtable lookup table for use with the native SIMD size.\n+ */\n+ASTCENC_SIMD_INLINE void vtable_prepare(vint4 t0, vint4& t0p)\n+{\n+ t0p = t0;\n+}\n+\n/**\n* @brief Prepare a vtable lookup table for use with the native SIMD size.\n*/\n@@ -1065,6 +1073,21 @@ ASTCENC_SIMD_INLINE void vtable_prepare(\nt3p = t3;\n}\n+/**\n+ * @brief Perform an 8-bit 32-entry table lookup, with 32-bit indexes.\n+ */\n+ASTCENC_SIMD_INLINE vint4 vtable_8bt_32bi(vint4 t0, vint4 idx)\n+{\n+ uint8_t table[16];\n+ storea(t0, reinterpret_cast<int*>(table + 0));\n+\n+ return vint4(table[idx.lane<0>()],\n+ table[idx.lane<1>()],\n+ table[idx.lane<2>()],\n+ table[idx.lane<3>()]);\n+}\n+\n+\n/**\n* @brief Perform an 8-bit 32-entry table lookup, with 32-bit indexes.\n*/\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_sse_4.h", "new_path": "Source/astcenc_vecmathlib_sse_4.h", "diff": "@@ -1025,6 +1025,14 @@ ASTCENC_SIMD_INLINE vfloat4 int_as_float(vint4 v)\nreturn vfloat4(_mm_castsi128_ps(v.m));\n}\n+/**\n+ * @brief Prepare a vtable lookup table for use with the native SIMD size.\n+ */\n+ASTCENC_SIMD_INLINE void vtable_prepare(vint4 t0, vint4& t0p)\n+{\n+ t0p = t0;\n+}\n+\n/**\n* @brief Prepare a vtable lookup table for use with the native SIMD size.\n*/\n@@ -1059,6 +1067,28 @@ ASTCENC_SIMD_INLINE void vtable_prepare(\n#endif\n}\n+/**\n+ * @brief Perform an 8-bit 16-entry table lookup, with 32-bit indexes.\n+ */\n+ASTCENC_SIMD_INLINE vint4 vtable_8bt_32bi(vint4 t0, vint4 idx)\n+{\n+#if ASTCENC_SSE >= 30\n+ // Set index byte MSB to 1 for unused bytes so shuffle returns zero\n+ __m128i idxx = _mm_or_si128(idx.m, _mm_set1_epi32(0xFFFFFF00));\n+\n+ __m128i result = _mm_shuffle_epi8(t0.m, idxx);\n+ return vint4(result);\n+#else\n+ alignas(ASTCENC_VECALIGN) uint8_t table[16];\n+ storea(t0, reinterpret_cast<int*>(table + 0));\n+\n+ return vint4(table[idx.lane<0>()],\n+ table[idx.lane<1>()],\n+ table[idx.lane<2>()],\n+ table[idx.lane<3>()]);\n+#endif\n+}\n+\n/**\n* @brief Perform an 8-bit 32-entry table lookup, with 32-bit indexes.\n*/\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Use smaller vtable when possible
61,745
03.06.2022 23:35:11
-3,600
f053748fa1dbde391b1c1dbc7bbeceb38ac2fc6c
Fix small NEON vtable
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_neon_4.h", "new_path": "Source/astcenc_vecmathlib_neon_4.h", "diff": "@@ -960,7 +960,7 @@ ASTCENC_SIMD_INLINE void vtable_prepare(\n*/\nASTCENC_SIMD_INLINE vint4 vtable_8bt_32bi(vint4 t0, vint4 idx)\n{\n- int8x16x2_t table { t0.m };\n+ int8x16_t table { t0.m };\n// Set index byte MSB to 1 for unused bytes so shuffle returns zero\nint32x4_t idx_masked = vorrq_s32(idx.m, vdupq_n_s32(0xFFFFFF00));\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Fix small NEON vtable
61,745
04.06.2022 08:55:19
-3,600
8263924ee0c19f346f3f3f3a8368dd68e920c7ff
Wrap _mm256_set_m128i for GCC 7.x
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_avx2_8.h", "new_path": "Source/astcenc_vecmathlib_avx2_8.h", "diff": "#include <cstdio>\n+// Define convenience intrinsics that are missing on older compilers\n+#define astcenc_mm256_set_m128i(m, n) _mm256_insertf128_si256(_mm256_castsi128_si256((n)), (m), 1)\n+\n// ============================================================================\n// vfloat8 data type\n// ============================================================================\n@@ -503,10 +506,7 @@ ASTCENC_SIMD_INLINE vint8 hmin(vint8 a)\nm = _mm_min_epi32(m, _mm_shuffle_epi32(m, _MM_SHUFFLE(0,0,0,1)));\nm = _mm_shuffle_epi32(m, _MM_SHUFFLE(0,0,0,0));\n- // This is the most logical implementation, but the convenience intrinsic\n- // is missing on older compilers (supported in g++ 9 and clang++ 9).\n- //__m256i r = _mm256_set_m128i(m, m)\n- __m256i r = _mm256_insertf128_si256(_mm256_castsi128_si256(m), m, 1);\n+ __m256i r = astcenc_mm256_set_m128i(m, m);\nvint8 vmin(r);\nreturn vmin;\n}\n@@ -521,10 +521,7 @@ ASTCENC_SIMD_INLINE vint8 hmax(vint8 a)\nm = _mm_max_epi32(m, _mm_shuffle_epi32(m, _MM_SHUFFLE(0,0,0,1)));\nm = _mm_shuffle_epi32(m, _MM_SHUFFLE(0,0,0,0));\n- // This is the most logical implementation, but the convenience intrinsic\n- // is missing on older compilers (supported in g++ 9 and clang++ 9).\n- //__m256i r = _mm256_set_m128i(m, m)\n- __m256i r = _mm256_insertf128_si256(_mm256_castsi128_si256(m), m, 1);\n+ __m256i r = astcenc_mm256_set_m128i(m, m);\nvint8 vmax(r);\nreturn vmax;\n}\n@@ -578,10 +575,7 @@ ASTCENC_SIMD_INLINE vint8 pack_low_bytes(vint8 v)\n__m128i a1 = _mm256_extracti128_si256(a, 1);\n__m128i b = _mm_unpacklo_epi32(a0, a1);\n- // This is the most logical implementation, but the convenience intrinsic\n- // is missing on older compilers (supported in g++ 9 and clang++ 9).\n- //__m256i r = _mm256_set_m128i(b, b)\n- __m256i r = _mm256_insertf128_si256(_mm256_castsi128_si256(b), b, 1);\n+ __m256i r = astcenc_mm256_set_m128i(b, b);\nreturn vint8(r);\n}\n@@ -1011,7 +1005,7 @@ ASTCENC_SIMD_INLINE void vtable_prepare(vint4 t0, vint8& t0p)\n{\n// AVX2 duplicates the table within each 128-bit lane\n__m128i t0n = t0.m;\n- t0p = vint8(_mm256_set_m128i(t0n, t0n));\n+ t0p = vint8(astcenc_mm256_set_m128i(t0n, t0n));\n}\n/**\n@@ -1021,10 +1015,10 @@ ASTCENC_SIMD_INLINE void vtable_prepare(vint4 t0, vint4 t1, vint8& t0p, vint8& t\n{\n// AVX2 duplicates the table within each 128-bit lane\n__m128i t0n = t0.m;\n- t0p = vint8(_mm256_set_m128i(t0n, t0n));\n+ t0p = vint8(astcenc_mm256_set_m128i(t0n, t0n));\n__m128i t1n = _mm_xor_si128(t0.m, t1.m);\n- t1p = vint8(_mm256_set_m128i(t1n, t1n));\n+ t1p = vint8(astcenc_mm256_set_m128i(t1n, t1n));\n}\n/**\n@@ -1036,16 +1030,16 @@ ASTCENC_SIMD_INLINE void vtable_prepare(\n{\n// AVX2 duplicates the table within each 128-bit lane\n__m128i t0n = t0.m;\n- t0p = vint8(_mm256_set_m128i(t0n, t0n));\n+ t0p = vint8(astcenc_mm256_set_m128i(t0n, t0n));\n__m128i t1n = _mm_xor_si128(t0.m, t1.m);\n- t1p = vint8(_mm256_set_m128i(t1n, t1n));\n+ t1p = vint8(astcenc_mm256_set_m128i(t1n, t1n));\n__m128i t2n = _mm_xor_si128(t1.m, t2.m);\n- t2p = vint8(_mm256_set_m128i(t2n, t2n));\n+ t2p = vint8(astcenc_mm256_set_m128i(t2n, t2n));\n__m128i t3n = _mm_xor_si128(t2.m, t3.m);\n- t3p = vint8(_mm256_set_m128i(t3n, t3n));\n+ t3p = vint8(astcenc_mm256_set_m128i(t3n, t3n));\n}\n/**\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Wrap _mm256_set_m128i for GCC 7.x
61,745
04.06.2022 09:41:11
-3,600
b7b74f87f7ed9a695bd0bae12e99b577225ee7d1
Enable more block modes for -fastest
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_entry.cpp", "new_path": "Source/astcenc_entry.cpp", "diff": "@@ -62,7 +62,7 @@ struct astcenc_preset_config\nstatic const std::array<astcenc_preset_config, 5> preset_configs_high {{\n{\nASTCENC_PRE_FASTEST,\n- 2, 10, 42, 2, 2, 85.2f, 63.2f, 3.5f, 3.5f, 1.0f, 1.0f, 0.5f, 25\n+ 2, 10, 43, 2, 2, 85.2f, 63.2f, 3.5f, 3.5f, 1.0f, 1.0f, 0.5f, 25\n}, {\nASTCENC_PRE_FAST,\n3, 14, 55, 3, 3, 85.2f, 63.2f, 3.5f, 3.5f, 1.0f, 1.1f, 0.65f, 20\n@@ -85,7 +85,7 @@ static const std::array<astcenc_preset_config, 5> preset_configs_high {{\nstatic const std::array<astcenc_preset_config, 5> preset_configs_mid {{\n{\nASTCENC_PRE_FASTEST,\n- 2, 10, 42, 2, 2, 85.2f, 63.2f, 3.5f, 3.5f, 1.0f, 1.0f, 0.5f, 20\n+ 2, 10, 43, 2, 2, 85.2f, 63.2f, 3.5f, 3.5f, 1.0f, 1.0f, 0.5f, 20\n}, {\nASTCENC_PRE_FAST,\n3, 15, 55, 3, 3, 85.2f, 63.2f, 3.5f, 3.5f, 1.0f, 1.1f, 0.5f, 16\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Enable more block modes for -fastest
61,745
04.06.2022 16:58:03
-3,600
d1b241c7db1c64cd6f4f7c93d6d99f41f44873c2
Vectorize block store
[ { "change_type": "MODIFY", "old_path": "Source/UnitTest/test_simd.cpp", "new_path": "Source/UnitTest/test_simd.cpp", "diff": "@@ -1753,6 +1753,72 @@ TEST(vint4, store_nbytes)\nEXPECT_EQ(out, 42);\n}\n+/** @brief Test vint8 store_lanes_masked. */\n+TEST(vint4, store_lanes_masked)\n+{\n+ int resulta[4] { 0 };\n+\n+ // Store nothing\n+ vmask4 mask1 = vint4(0) == vint4(1);\n+ vint4 data1 = vint4(1);\n+\n+ store_lanes_masked(resulta, data1, mask1);\n+ vint4 result1v(resulta);\n+ vint4 expect1v = vint4::zero();\n+ EXPECT_TRUE(all(result1v == expect1v));\n+\n+ // Store half\n+ vmask4 mask2 = vint4(1, 1, 0, 0) == vint4(1);\n+ vint4 data2 = vint4(2);\n+\n+ store_lanes_masked(resulta, data2, mask2);\n+ vint4 result2v(resulta);\n+ vint4 expect2v = vint4(2, 2, 0, 0);\n+ EXPECT_TRUE(all(result2v == expect2v));\n+\n+ // Store all\n+ vmask4 mask3 = vint4(1) == vint4(1);\n+ vint4 data3 = vint4(3);\n+\n+ store_lanes_masked(resulta, data3, mask3);\n+ vint4 result3v(resulta);\n+ vint4 expect3v = vint4(3);\n+ EXPECT_TRUE(all(result3v == expect3v));\n+}\n+\n+/** @brief Test vint8 store_lanes_masked to unaligned address. */\n+TEST(vint4, store_lanes_masked_unaligned)\n+{\n+ int8_t resulta[17] { 0 };\n+\n+ // Store nothing\n+ vmask4 mask1 = vint4(0) == vint4(1);\n+ vint4 data1 = vint4(1);\n+\n+ store_lanes_masked(reinterpret_cast<int*>(resulta + 1), data1, mask1);\n+ vint4 result1v(reinterpret_cast<int*>(resulta + 1));\n+ vint4 expect1v = vint4::zero();\n+ EXPECT_TRUE(all(result1v == expect1v));\n+\n+ // Store half\n+ vmask4 mask2 = vint4(1, 1, 0, 0) == vint4(1);\n+ vint4 data2 = vint4(2);\n+\n+ store_lanes_masked(reinterpret_cast<int*>(resulta + 1), data2, mask2);\n+ vint4 result2v(reinterpret_cast<int*>(resulta + 1));\n+ vint4 expect2v = vint4(2, 2, 0, 0);\n+ EXPECT_TRUE(all(result2v == expect2v));\n+\n+ // Store all\n+ vmask4 mask3 = vint4(1) == vint4(1);\n+ vint4 data3 = vint4(3);\n+\n+ store_lanes_masked(reinterpret_cast<int*>(resulta + 1), data3, mask3);\n+ vint4 result3v(reinterpret_cast<int*>(resulta + 1));\n+ vint4 expect3v = vint4(3);\n+ EXPECT_TRUE(all(result3v == expect3v));\n+}\n+\n/** @brief Test vint4 gatheri. */\nTEST(vint4, gatheri)\n{\n@@ -1928,6 +1994,22 @@ TEST(vint4, vtable_8bt_32bi_64entry)\nEXPECT_EQ(result.lane<3>(), 60);\n}\n+/** @brief Test vint4 rgba byte interleave. */\n+TEST(vint4, interleave_rgba8)\n+{\n+ vint4 r(0x01, 0x11, 0x21, 0x31);\n+ vint4 g(0x02, 0x12, 0x22, 0x32);\n+ vint4 b(0x03, 0x13, 0x23, 0x33);\n+ vint4 a(0x04, 0x14, 0x24, 0x34);\n+\n+ vint4 result = interleave_rgba8(r, g, b, a);\n+\n+ EXPECT_EQ(result.lane<0>(), 0x04030201);\n+ EXPECT_EQ(result.lane<1>(), 0x14131211);\n+ EXPECT_EQ(result.lane<2>(), 0x24232221);\n+ EXPECT_EQ(result.lane<3>(), 0x34333231);\n+}\n+\n# if ASTCENC_SIMD_WIDTH == 8\n// VFLOAT8 tests - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n@@ -3013,6 +3095,42 @@ TEST(vint8, max)\nEXPECT_EQ(r.lane<7>(), 5);\n}\n+/** @brief Test vint8 lsl. */\n+TEST(vint8, lsl)\n+{\n+ vint8 a(1, 2, 4, -4, 1, 2, 4, -4);\n+ a = lsl<0>(a);\n+ EXPECT_EQ(a.lane<0>(), 1);\n+ EXPECT_EQ(a.lane<1>(), 2);\n+ EXPECT_EQ(a.lane<2>(), 4);\n+ EXPECT_EQ(a.lane<3>(), 0xFFFFFFFC);\n+ EXPECT_EQ(a.lane<4>(), 1);\n+ EXPECT_EQ(a.lane<5>(), 2);\n+ EXPECT_EQ(a.lane<6>(), 4);\n+ EXPECT_EQ(a.lane<7>(), 0xFFFFFFFC);\n+\n+\n+ a = lsl<1>(a);\n+ EXPECT_EQ(a.lane<0>(), 2);\n+ EXPECT_EQ(a.lane<1>(), 4);\n+ EXPECT_EQ(a.lane<2>(), 8);\n+ EXPECT_EQ(a.lane<3>(), 0xFFFFFFF8);\n+ EXPECT_EQ(a.lane<4>(), 2);\n+ EXPECT_EQ(a.lane<5>(), 4);\n+ EXPECT_EQ(a.lane<6>(), 8);\n+ EXPECT_EQ(a.lane<7>(), 0xFFFFFFF8);\n+\n+ a = lsl<2>(a);\n+ EXPECT_EQ(a.lane<0>(), 8);\n+ EXPECT_EQ(a.lane<1>(), 16);\n+ EXPECT_EQ(a.lane<2>(), 32);\n+ EXPECT_EQ(a.lane<3>(), 0xFFFFFFE0);\n+ EXPECT_EQ(a.lane<4>(), 8);\n+ EXPECT_EQ(a.lane<5>(), 16);\n+ EXPECT_EQ(a.lane<6>(), 32);\n+ EXPECT_EQ(a.lane<7>(), 0xFFFFFFE0);\n+}\n+\n/** @brief Test vint8 lsr. */\nTEST(vint8, lsr)\n{\n@@ -3179,6 +3297,72 @@ TEST(vint8, store_nbytes)\nEXPECT_EQ(out[1], 314);\n}\n+/** @brief Test vint8 store_lanes_masked. */\n+TEST(vint8, store_lanes_masked)\n+{\n+ int resulta[8] { 0 };\n+\n+ // Store nothing\n+ vmask8 mask1 = vint8(0) == vint8(1);\n+ vint8 data1 = vint8(1);\n+\n+ store_lanes_masked(resulta, data1, mask1);\n+ vint8 result1v(resulta);\n+ vint8 expect1v = vint8::zero();\n+ EXPECT_TRUE(all(result1v == expect1v));\n+\n+ // Store half\n+ vmask8 mask2 = vint8(1, 1, 1, 1, 0, 0, 0, 0) == vint8(1);\n+ vint8 data2 = vint8(2);\n+\n+ store_lanes_masked(resulta, data2, mask2);\n+ vint8 result2v(resulta);\n+ vint8 expect2v = vint8(2, 2, 2, 2, 0, 0, 0, 0);\n+ EXPECT_TRUE(all(result2v == expect2v));\n+\n+ // Store all\n+ vmask8 mask3 = vint8(1) == vint8(1);\n+ vint8 data3 = vint8(3);\n+\n+ store_lanes_masked(resulta, data3, mask3);\n+ vint8 result3v(resulta);\n+ vint8 expect3v = vint8(3);\n+ EXPECT_TRUE(all(result3v == expect3v));\n+}\n+\n+/** @brief Test vint8 store_lanes_masked to unaligned address. */\n+TEST(vint8, store_lanes_masked_unaligned)\n+{\n+ int8_t resulta[33] { 0 };\n+\n+ // Store nothing\n+ vmask8 mask1 = vint8(0) == vint8(1);\n+ vint8 data1 = vint8(1);\n+\n+ store_lanes_masked(reinterpret_cast<int*>(resulta + 1), data1, mask1);\n+ vint8 result1v(reinterpret_cast<int*>(resulta + 1));\n+ vint8 expect1v = vint8::zero();\n+ EXPECT_TRUE(all(result1v == expect1v));\n+\n+ // Store half\n+ vmask8 mask2 = vint8(1, 1, 1, 1, 0, 0, 0, 0) == vint8(1);\n+ vint8 data2 = vint8(2);\n+\n+ store_lanes_masked(reinterpret_cast<int*>(resulta + 1), data2, mask2);\n+ vint8 result2v(reinterpret_cast<int*>(resulta + 1));\n+ vint8 expect2v = vint8(2, 2, 2, 2, 0, 0, 0, 0);\n+ EXPECT_TRUE(all(result2v == expect2v));\n+\n+ // Store all\n+ vmask8 mask3 = vint8(1) == vint8(1);\n+ vint8 data3 = vint8(3);\n+\n+ store_lanes_masked(reinterpret_cast<int*>(resulta + 1), data3, mask3);\n+ vint8 result3v(reinterpret_cast<int*>(resulta + 1));\n+ vint8 expect3v = vint8(3);\n+ EXPECT_TRUE(all(result3v == expect3v));\n+}\n+\n/** @brief Test vint8 gatheri. */\nTEST(vint8, gatheri)\n{\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_image.cpp", "new_path": "Source/astcenc_image.cpp", "diff": "@@ -341,24 +341,21 @@ void store_image_block(\nunsigned int zpos,\nconst astcenc_swizzle& swz\n) {\n- unsigned int xsize = img.dim_x;\n- unsigned int ysize = img.dim_y;\n- unsigned int zsize = img.dim_z;\n-\n+ unsigned int x_size = img.dim_x;\nunsigned int x_start = xpos;\n- unsigned int x_end = std::min(xsize, xpos + bsd.xdim);\n- unsigned int x_nudge = bsd.xdim - (x_end - x_start);\n+ unsigned int x_end = astc::min(x_size, xpos + bsd.xdim);\n+ unsigned int x_count = x_end - x_start;\n+ unsigned int x_nudge = bsd.xdim - x_count;\n+ unsigned int y_size = img.dim_y;\nunsigned int y_start = ypos;\n- unsigned int y_end = std::min(ysize, ypos + bsd.ydim);\n- unsigned int y_nudge = (bsd.ydim - (y_end - y_start)) * bsd.xdim;\n+ unsigned int y_end = astc::min(y_size, ypos + bsd.ydim);\n+ unsigned int y_count = y_end - y_start;\n+ unsigned int y_nudge = (bsd.ydim - y_count) * bsd.xdim;\n+ unsigned int z_size = img.dim_z;\nunsigned int z_start = zpos;\n- unsigned int z_end = std::min(zsize, zpos + bsd.zdim);\n-\n- float data[7];\n- data[ASTCENC_SWZ_0] = 0.0f;\n- data[ASTCENC_SWZ_1] = 1.0f;\n+ unsigned int z_end = astc::min(z_size, zpos + bsd.zdim);\n// True if any non-identity swizzle\nbool needs_swz = (swz.r != ASTCENC_SWZ_R) || (swz.g != ASTCENC_SWZ_G) ||\n@@ -378,47 +375,68 @@ void store_image_block(\nfor (unsigned int y = y_start; y < y_end; y++)\n{\n- for (unsigned int x = x_start; x < x_end; x++)\n- {\n- vint4 colori = vint4::zero();\n+ uint8_t* data8_row = data8 + (4 * x_size * y) + (4 * x_start);\n- // Errors are NaN encoded - convert to magenta error color\n- if (blk.data_r[idx] != blk.data_r[idx])\n+ for (unsigned int x = 0; x < x_count; x += ASTCENC_SIMD_WIDTH)\n{\n- colori = vint4(0xFF, 0x00, 0xFF, 0xFF);\n- }\n- else if (needs_swz)\n+ unsigned int max_texels = ASTCENC_SIMD_WIDTH;\n+ unsigned int used_texels = astc::min(x_count - x, max_texels);\n+\n+ // Unaligned load as rows are not always SIMD_WIDTH long\n+ vfloat data_r(blk.data_r + idx);\n+ vfloat data_g(blk.data_g + idx);\n+ vfloat data_b(blk.data_b + idx);\n+ vfloat data_a(blk.data_a + idx);\n+\n+ vint data_ri = float_to_int_rtn(min(data_r, 1.0f) * 255.0f);\n+ vint data_gi = float_to_int_rtn(min(data_g, 1.0f) * 255.0f);\n+ vint data_bi = float_to_int_rtn(min(data_b, 1.0f) * 255.0f);\n+ vint data_ai = float_to_int_rtn(min(data_a, 1.0f) * 255.0f);\n+\n+ if (needs_swz)\n{\n- data[ASTCENC_SWZ_R] = blk.data_r[idx];\n- data[ASTCENC_SWZ_G] = blk.data_g[idx];\n- data[ASTCENC_SWZ_B] = blk.data_b[idx];\n- data[ASTCENC_SWZ_A] = blk.data_a[idx];\n+ vint swizzle_table[7];\n+ swizzle_table[ASTCENC_SWZ_0] = vint(0);\n+ swizzle_table[ASTCENC_SWZ_1] = vint(255);\n+ swizzle_table[ASTCENC_SWZ_R] = data_ri;\n+ swizzle_table[ASTCENC_SWZ_G] = data_gi;\n+ swizzle_table[ASTCENC_SWZ_B] = data_bi;\n+ swizzle_table[ASTCENC_SWZ_A] = data_ai;\nif (needs_z)\n{\n- float xcoord = (data[0] * 2.0f) - 1.0f;\n- float ycoord = (data[3] * 2.0f) - 1.0f;\n- float zcoord = 1.0f - xcoord * xcoord - ycoord * ycoord;\n- if (zcoord < 0.0f)\n- {\n- zcoord = 0.0f;\n- }\n- data[ASTCENC_SWZ_Z] = (astc::sqrt(zcoord) * 0.5f) + 0.5f;\n+ vfloat data_x = (data_r * vfloat(2.0f)) - vfloat(1.0f);\n+ vfloat data_y = (data_a * vfloat(2.0f)) - vfloat(1.0f);\n+ vfloat data_z = vfloat(1.0f) - (data_x * data_x) - (data_y * data_y);\n+ data_z = max(data_z, 0.0f);\n+ data_z = (sqrt(data_z) * vfloat(0.5f)) + vfloat(0.5f);\n+\n+ swizzle_table[ASTCENC_SWZ_Z] = float_to_int_rtn(min(data_z, 1.0f) * 255.0f);\n}\n- vfloat4 color = vfloat4(data[swz.r], data[swz.g], data[swz.b], data[swz.a]);\n- colori = float_to_int_rtn(min(color, 1.0f) * 255.0f);\n+ data_ri = swizzle_table[swz.r];\n+ data_gi = swizzle_table[swz.g];\n+ data_bi = swizzle_table[swz.b];\n+ data_ai = swizzle_table[swz.a];\n}\n- else\n+\n+ // Errors are NaN encoded - convert to magenta error color\n+ // Branch is OK here - it is almost never true so predicts well\n+ vmask nan_mask = data_r != data_r;\n+ if (any(nan_mask))\n{\n- vfloat4 color = blk.texel(idx);\n- colori = float_to_int_rtn(min(color, 1.0f) * 255.0f);\n+ data_ri = select(data_ri, vint(0xFF), nan_mask);\n+ data_gi = select(data_gi, vint(0x00), nan_mask);\n+ data_bi = select(data_bi, vint(0xFF), nan_mask);\n+ data_ai = select(data_ai, vint(0xFF), nan_mask);\n}\n- colori = pack_low_bytes(colori);\n- store_nbytes(colori, data8 + (4 * xsize * y) + (4 * x ));\n+ vint data_rgbai = interleave_rgba8(data_ri, data_gi, data_bi, data_ai);\n+ vmask store_mask = vint::lane_id() < vint(used_texels);\n+ store_lanes_masked((int*)data8_row, data_rgbai, store_mask);\n- idx++;\n+ data8_row += ASTCENC_SIMD_WIDTH * 4;\n+ idx += used_texels;\n}\nidx += x_nudge;\n}\n@@ -434,13 +452,18 @@ void store_image_block(\nfor (unsigned int y = y_start; y < y_end; y++)\n{\n- for (unsigned int x = x_start; x < x_end; x++)\n+ uint16_t* data16_row = data16 + (4 * x_size * y) + (4 * x_start);\n+\n+ for (unsigned int x = 0; x < x_count; x++)\n{\nvint4 color;\n// NaNs are handled inline - no need to special case\nif (needs_swz)\n{\n+ float data[7];\n+ data[ASTCENC_SWZ_0] = 0.0f;\n+ data[ASTCENC_SWZ_1] = 1.0f;\ndata[ASTCENC_SWZ_R] = blk.data_r[idx];\ndata[ASTCENC_SWZ_G] = blk.data_g[idx];\ndata[ASTCENC_SWZ_B] = blk.data_b[idx];\n@@ -467,11 +490,12 @@ void store_image_block(\ncolor = float_to_float16(colorf);\n}\n- data16[(4 * xsize * y) + (4 * x )] = static_cast<uint16_t>(color.lane<0>());\n- data16[(4 * xsize * y) + (4 * x + 1)] = static_cast<uint16_t>(color.lane<1>());\n- data16[(4 * xsize * y) + (4 * x + 2)] = static_cast<uint16_t>(color.lane<2>());\n- data16[(4 * xsize * y) + (4 * x + 3)] = static_cast<uint16_t>(color.lane<3>());\n-\n+ // TODO: Vectorize with store N shorts?\n+ data16_row[0] = static_cast<uint16_t>(color.lane<0>());\n+ data16_row[1] = static_cast<uint16_t>(color.lane<1>());\n+ data16_row[2] = static_cast<uint16_t>(color.lane<2>());\n+ data16_row[3] = static_cast<uint16_t>(color.lane<3>());\n+ data16_row += 4;\nidx++;\n}\nidx += x_nudge;\n@@ -490,13 +514,18 @@ void store_image_block(\nfor (unsigned int y = y_start; y < y_end; y++)\n{\n- for (unsigned int x = x_start; x < x_end; x++)\n+ float* data32_row = data32 + (4 * x_size * y) + (4 * x_start);\n+\n+ for (unsigned int x = 0; x < x_count; x++)\n{\nvfloat4 color = blk.texel(idx);\n// NaNs are handled inline - no need to special case\nif (needs_swz)\n{\n+ float data[7];\n+ data[ASTCENC_SWZ_0] = 0.0f;\n+ data[ASTCENC_SWZ_1] = 1.0f;\ndata[ASTCENC_SWZ_R] = color.lane<0>();\ndata[ASTCENC_SWZ_G] = color.lane<1>();\ndata[ASTCENC_SWZ_B] = color.lane<2>();\n@@ -517,8 +546,8 @@ void store_image_block(\ncolor = vfloat4(data[swz.r], data[swz.g], data[swz.b], data[swz.a]);\n}\n- store(color, data32 + (4 * xsize * y) + (4 * x ));\n-\n+ store(color, data32_row);\n+ data32_row += 4;\nidx++;\n}\nidx += x_nudge;\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_avx2_8.h", "new_path": "Source/astcenc_vecmathlib_avx2_8.h", "diff": "@@ -464,6 +464,14 @@ ASTCENC_SIMD_INLINE vmask8 operator>(vint8 a, vint8 b)\nreturn vmask8(_mm256_cmpgt_epi32(a.m, b.m));\n}\n+/**\n+ * @brief Logical shift left.\n+ */\n+template <int s> ASTCENC_SIMD_INLINE vint8 lsl(vint8 a)\n+{\n+ return vint8(_mm256_slli_epi32(a.m, s));\n+}\n+\n/**\n* @brief Arithmetic shift right.\n*/\n@@ -725,6 +733,16 @@ ASTCENC_SIMD_INLINE vfloat8 min(vfloat8 a, vfloat8 b)\nreturn vfloat8(_mm256_min_ps(a.m, b.m));\n}\n+/**\n+ * @brief Return the min vector of a vector and a scalar.\n+ *\n+ * If either lane value is NaN, @c b will be returned for that lane.\n+ */\n+ASTCENC_SIMD_INLINE vfloat8 min(vfloat8 a, float b)\n+{\n+ return min(a, vfloat8(b));\n+}\n+\n/**\n* @brief Return the max vector of two vectors.\n*\n@@ -735,6 +753,16 @@ ASTCENC_SIMD_INLINE vfloat8 max(vfloat8 a, vfloat8 b)\nreturn vfloat8(_mm256_max_ps(a.m, b.m));\n}\n+/**\n+ * @brief Return the max vector of a vector and a scalar.\n+ *\n+ * If either lane value is NaN, @c b will be returned for that lane.\n+ */\n+ASTCENC_SIMD_INLINE vfloat8 max(vfloat8 a, float b)\n+{\n+ return max(a, vfloat8(b));\n+}\n+\n/**\n* @brief Return the clamped value between min and max.\n*\n@@ -966,6 +994,16 @@ ASTCENC_SIMD_INLINE vint8 float_to_int(vfloat8 a)\nreturn vint8(_mm256_cvttps_epi32(a.m));\n}\n+/**\n+ * @brief Return a integer value for a float vector, using round-to-nearest.\n+ */\n+ASTCENC_SIMD_INLINE vint8 float_to_int_rtn(vfloat8 a)\n+{\n+ a = round(a);\n+ return vint8(_mm256_cvttps_epi32(a.m));\n+}\n+\n+\n/**\n* @brief Return a float value for an integer vector.\n*/\n@@ -1095,6 +1133,29 @@ ASTCENC_SIMD_INLINE vint8 vtable_8bt_32bi(vint8 t0, vint8 t1, vint8 t2, vint8 t3\nreturn vint8(result);\n}\n+/**\n+ * @brief Return a vector of interleaved RGBA data.\n+ *\n+ * Input vectors have the value stored in the bottom 8 bits of each lane,\n+ * with high bits set to zero.\n+ *\n+ * Output vector stores a single RGBA texel packed in each lane.\n+ */\n+ASTCENC_SIMD_INLINE vint8 interleave_rgba8(vint8 r, vint8 g, vint8 b, vint8 a)\n+{\n+ return r + lsl<8>(g) + lsl<16>(b) + lsl<24>(a);\n+}\n+\n+/**\n+ * @brief Store a vector, skipping masked lanes.\n+ *\n+ * All masked lanes must be at the end of vector, after all non-masked lanes.\n+ */\n+ASTCENC_SIMD_INLINE void store_lanes_masked(int* base, vint8 data, vmask8 mask)\n+{\n+ _mm256_maskstore_epi32(base, mask.m, data.m);\n+}\n+\n/**\n* @brief Debug function to print a vector of ints.\n*/\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_neon_4.h", "new_path": "Source/astcenc_vecmathlib_neon_4.h", "diff": "@@ -283,7 +283,7 @@ struct vint4\n*/\nstatic ASTCENC_SIMD_INLINE vint4 lane_id()\n{\n- alignas(ASTCENC_VECALIGN) static const int data[4] { 0, 1, 2, 3 };\n+ alignas(16) static const int data[4] { 0, 1, 2, 3 };\nreturn vint4(vld1q_s32(data));\n}\n@@ -997,6 +997,48 @@ ASTCENC_SIMD_INLINE vint4 vtable_8bt_32bi(vint4 t0, vint4 t1, vint4 t2, vint4 t3\nreturn vint4(vqtbl4q_s8(table, idx_bytes));\n}\n+/**\n+ * @brief Return a vector of interleaved RGBA data.\n+ *\n+ * Input vectors have the value stored in the bottom 8 bits of each lane,\n+ * with high bits set to zero.\n+ *\n+ * Output vector stores a single RGBA texel packed in each lane.\n+ */\n+ASTCENC_SIMD_INLINE vint4 interleave_rgba8(vint4 r, vint4 g, vint4 b, vint4 a)\n+{\n+ return r + lsl<8>(g) + lsl<16>(b) + lsl<24>(a);\n+}\n+\n+/**\n+ * @brief Store a vector, skipping masked lanes.\n+ *\n+ * All masked lanes must be at the end of vector, after all non-masked lanes.\n+ */\n+ASTCENC_SIMD_INLINE void store_lanes_masked(int* base, vint4 data, vmask4 mask)\n+{\n+ if (mask.m[3])\n+ {\n+ store(data, base);\n+ }\n+ else if(mask.m[2])\n+ {\n+ base[0] = data.lane<0>();\n+ base[1] = data.lane<1>();\n+ base[2] = data.lane<2>();\n+ }\n+ else if(mask.m[1])\n+ {\n+ base[0] = data.lane<0>();\n+ base[1] = data.lane<1>();\n+ }\n+ else if(mask.m[0])\n+ {\n+ base[0] = data.lane<0>();\n+ base[1] = data.lane<1>();\n+ }\n+}\n+\n#define ASTCENC_USE_NATIVE_POPCOUNT 1\n/**\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_none_4.h", "new_path": "Source/astcenc_vecmathlib_none_4.h", "diff": "@@ -1118,6 +1118,47 @@ ASTCENC_SIMD_INLINE vint4 vtable_8bt_32bi(vint4 t0, vint4 t1, vint4 t2, vint4 t3\ntable[idx.lane<1>()],\ntable[idx.lane<2>()],\ntable[idx.lane<3>()]);\n+\n+/**\n+ * @brief Return a vector of interleaved RGBA data.\n+ *\n+ * Input vectors have the value stored in the bottom 8 bits of each lane,\n+ * with high bits set to zero.\n+ *\n+ * Output vector stores a single RGBA texel packed in each lane.\n+ */\n+ASTCENC_SIMD_INLINE vint4 interleave_rgba8(vint4 r, vint4 g, vint4 b, vint4 a)\n+{\n+ return r + lsl<8>(g) + lsl<16>(b) + lsl<24>(a);\n+}\n+\n+/**\n+ * @brief Store a vector, skipping masked lanes.\n+ *\n+ * All masked lanes must be at the end of vector, after all non-masked lanes.\n+ */\n+ASTCENC_SIMD_INLINE void store_lanes_masked(int* base, vint4 data, vmask4 mask)\n+{\n+ if (mask.m[3])\n+ {\n+ store(data, base);\n+ }\n+ else if(mask.m[2])\n+ {\n+ base[0] = data.lane<0>();\n+ base[1] = data.lane<1>();\n+ base[2] = data.lane<2>();\n+ }\n+ else if(mask.m[1])\n+ {\n+ base[0] = data.lane<0>();\n+ base[1] = data.lane<1>();\n+ }\n+ else if(mask.m[0])\n+ {\n+ base[0] = data.lane<0>();\n+ base[1] = data.lane<1>();\n+ }\n}\n#endif // #ifndef ASTC_VECMATHLIB_NONE_4_H_INCLUDED\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_sse_4.h", "new_path": "Source/astcenc_vecmathlib_sse_4.h", "diff": "@@ -1155,6 +1155,36 @@ ASTCENC_SIMD_INLINE vint4 vtable_8bt_32bi(vint4 t0, vint4 t1, vint4 t2, vint4 t3\n#endif\n}\n+/**\n+ * @brief Return a vector of interleaved RGBA data.\n+ *\n+ * Input vectors have the value stored in the bottom 8 bits of each lane,\n+ * with high bits set to zero.\n+ *\n+ * Output vector stores a single RGBA texel packed in each lane.\n+ */\n+ASTCENC_SIMD_INLINE vint4 interleave_rgba8(vint4 r, vint4 g, vint4 b, vint4 a)\n+{\n+ __m128i value = r.m;\n+ value = _mm_add_epi32(value, _mm_bslli_si128(g.m, 1));\n+ value = _mm_add_epi32(value, _mm_bslli_si128(b.m, 2));\n+ value = _mm_add_epi32(value, _mm_bslli_si128(a.m, 3));\n+ return vint4(value);\n+}\n+\n+/**\n+ * @brief Store a vector, skipping masked lanes.\n+ *\n+ * All masked lanes must be at the end of vector, after all non-masked lanes.\n+ */\n+ASTCENC_SIMD_INLINE void store_lanes_masked(int* base, vint4 data, vmask4 mask)\n+{\n+#if ASTCENC_AVX >= 2\n+ _mm_maskstore_epi32(base, mask.m, data.m);\n+#else\n+ _mm_maskmoveu_si128(data.m, mask.m, (char*)base);\n+#endif\n+}\n#if defined(ASTCENC_NO_INVARIANCE) && (ASTCENC_SSE >= 41)\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Vectorize block store (#337)
61,745
04.06.2022 19:24:28
-3,600
ede61daaca2bd18b3405a8252d017a35abe9bd87
Remove redundant quant_level mask
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_pick_best_endpoint_format.cpp", "new_path": "Source/astcenc_pick_best_endpoint_format.cpp", "diff": "@@ -1317,9 +1317,7 @@ unsigned int compute_ideal_endpoint_formats(\nfor (unsigned int j = start_block_mode; j < end_block_mode; j += ASTCENC_SIMD_WIDTH)\n{\nvfloat err = vfloat(errors_of_best_combination + j);\n- vmask mask1 = err < vbest_ep_error;\n- vmask mask2 = vint(reinterpret_cast<int*>(best_quant_levels + j)) > vint(4);\n- vmask mask = mask1 & mask2;\n+ vmask mask = err < vbest_ep_error;\nvbest_ep_error = select(vbest_ep_error, err, mask);\nvbest_error_index = select(vbest_error_index, lane_ids, mask);\nlane_ids += vint(ASTCENC_SIMD_WIDTH);\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Remove redundant quant_level mask
61,745
04.06.2022 20:58:38
-3,600
bddd48f9c85659393ebb9102697b5e76c68f9f88
Fix missing end brace in NONE builds
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_none_4.h", "new_path": "Source/astcenc_vecmathlib_none_4.h", "diff": "@@ -1118,6 +1118,7 @@ ASTCENC_SIMD_INLINE vint4 vtable_8bt_32bi(vint4 t0, vint4 t1, vint4 t2, vint4 t3\ntable[idx.lane<1>()],\ntable[idx.lane<2>()],\ntable[idx.lane<3>()]);\n+}\n/**\n* @brief Return a vector of interleaved RGBA data.\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Fix missing end brace in NONE builds
61,745
05.06.2022 08:25:43
-3,600
1d2e8b842ca8fe4966392707cc04f10e8ddfccc4
Workaround MSVC arm64 codegen bug
[ { "change_type": "MODIFY", "old_path": "Source/cmake_core.cmake", "new_path": "Source/cmake_core.cmake", "diff": "@@ -83,6 +83,16 @@ macro(astcenc_set_properties NAME)\n# MSVC defines\n$<$<CXX_COMPILER_ID:MSVC>:_CRT_SECURE_NO_WARNINGS>)\n+ # Work around compiler bug in MSVC when targeting arm64\n+ # https://developercommunity.visualstudio.com/t/inlining-turns-constant-into-register-operand-for/1394798\n+ # https://github.com/microsoft/vcpkg/pull/24869\n+ if(CMAKE_CXX_COMPILER_ID MATCHES \"MSVC\")\n+ if(CPU_ARCHITECTURE STREQUAL armv8 OR CPU_ARCHITECTURE STREQUAL arm64)\n+ set(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS} /d2ssa-cfg-sink-\")\n+ set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} /d2ssa-cfg-sink-\")\n+ endif()\n+ endif()\n+\nif(${DECOMPRESSOR})\ntarget_compile_definitions(${NAME}\nPRIVATE\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Workaround MSVC arm64 codegen bug
61,745
05.06.2022 08:38:19
-3,600
486e482584db76a639bec2f9e14fadc9b2151837
Add intrin type casts for MSVC
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_avx2_8.h", "new_path": "Source/astcenc_vecmathlib_avx2_8.h", "diff": "@@ -1153,7 +1153,7 @@ ASTCENC_SIMD_INLINE vint8 interleave_rgba8(vint8 r, vint8 g, vint8 b, vint8 a)\n*/\nASTCENC_SIMD_INLINE void store_lanes_masked(int* base, vint8 data, vmask8 mask)\n{\n- _mm256_maskstore_epi32(base, mask.m, data.m);\n+ _mm256_maskstore_epi32(base, _mm256_castps_si256(mask.m), data.m);\n}\n/**\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_sse_4.h", "new_path": "Source/astcenc_vecmathlib_sse_4.h", "diff": "@@ -1180,9 +1180,9 @@ ASTCENC_SIMD_INLINE vint4 interleave_rgba8(vint4 r, vint4 g, vint4 b, vint4 a)\nASTCENC_SIMD_INLINE void store_lanes_masked(int* base, vint4 data, vmask4 mask)\n{\n#if ASTCENC_AVX >= 2\n- _mm_maskstore_epi32(base, mask.m, data.m);\n+ _mm_maskstore_epi32(base, _mm_castps_si128(mask.m), data.m);\n#else\n- _mm_maskmoveu_si128(data.m, mask.m, (char*)base);\n+ _mm_maskmoveu_si128(data.m, _mm_castps_si128(mask.m), (char*)base);\n#endif\n}\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Add intrin type casts for MSVC
61,745
05.06.2022 10:35:49
-3,600
8a5e37eae7a581b420c660f80cbb78cd867b3314
Avoid _mm_bslli_si128 for VS2015 compat
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_common_4.h", "new_path": "Source/astcenc_vecmathlib_common_4.h", "diff": "@@ -295,6 +295,19 @@ ASTCENC_SIMD_INLINE float hadd_rgb_s(vfloat4 a)\nreturn a.lane<0>() + a.lane<1>() + a.lane<2>();\n}\n+/**\n+ * @brief Return a vector of interleaved RGBA data.\n+ *\n+ * Input vectors have the value stored in the bottom 8 bits of each lane,\n+ * with high bits set to zero.\n+ *\n+ * Output vector stores a single RGBA texel packed in each lane.\n+ */\n+ASTCENC_SIMD_INLINE vint4 interleave_rgba8(vint4 r, vint4 g, vint4 b, vint4 a)\n+{\n+ return r + lsl<8>(g) + lsl<16>(b) + lsl<24>(a);\n+}\n+\n#if !defined(ASTCENC_USE_NATIVE_DOT_PRODUCT)\n/**\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_neon_4.h", "new_path": "Source/astcenc_vecmathlib_neon_4.h", "diff": "@@ -997,19 +997,6 @@ ASTCENC_SIMD_INLINE vint4 vtable_8bt_32bi(vint4 t0, vint4 t1, vint4 t2, vint4 t3\nreturn vint4(vqtbl4q_s8(table, idx_bytes));\n}\n-/**\n- * @brief Return a vector of interleaved RGBA data.\n- *\n- * Input vectors have the value stored in the bottom 8 bits of each lane,\n- * with high bits set to zero.\n- *\n- * Output vector stores a single RGBA texel packed in each lane.\n- */\n-ASTCENC_SIMD_INLINE vint4 interleave_rgba8(vint4 r, vint4 g, vint4 b, vint4 a)\n-{\n- return r + lsl<8>(g) + lsl<16>(b) + lsl<24>(a);\n-}\n-\n/**\n* @brief Store a vector, skipping masked lanes.\n*\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_none_4.h", "new_path": "Source/astcenc_vecmathlib_none_4.h", "diff": "@@ -1120,19 +1120,6 @@ ASTCENC_SIMD_INLINE vint4 vtable_8bt_32bi(vint4 t0, vint4 t1, vint4 t2, vint4 t3\ntable[idx.lane<3>()]);\n}\n-/**\n- * @brief Return a vector of interleaved RGBA data.\n- *\n- * Input vectors have the value stored in the bottom 8 bits of each lane,\n- * with high bits set to zero.\n- *\n- * Output vector stores a single RGBA texel packed in each lane.\n- */\n-ASTCENC_SIMD_INLINE vint4 interleave_rgba8(vint4 r, vint4 g, vint4 b, vint4 a)\n-{\n- return r + lsl<8>(g) + lsl<16>(b) + lsl<24>(a);\n-}\n-\n/**\n* @brief Store a vector, skipping masked lanes.\n*\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_sse_4.h", "new_path": "Source/astcenc_vecmathlib_sse_4.h", "diff": "@@ -1155,23 +1155,6 @@ ASTCENC_SIMD_INLINE vint4 vtable_8bt_32bi(vint4 t0, vint4 t1, vint4 t2, vint4 t3\n#endif\n}\n-/**\n- * @brief Return a vector of interleaved RGBA data.\n- *\n- * Input vectors have the value stored in the bottom 8 bits of each lane,\n- * with high bits set to zero.\n- *\n- * Output vector stores a single RGBA texel packed in each lane.\n- */\n-ASTCENC_SIMD_INLINE vint4 interleave_rgba8(vint4 r, vint4 g, vint4 b, vint4 a)\n-{\n- __m128i value = r.m;\n- value = _mm_add_epi32(value, _mm_bslli_si128(g.m, 1));\n- value = _mm_add_epi32(value, _mm_bslli_si128(b.m, 2));\n- value = _mm_add_epi32(value, _mm_bslli_si128(a.m, 3));\n- return vint4(value);\n-}\n-\n/**\n* @brief Store a vector, skipping masked lanes.\n*\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Avoid _mm_bslli_si128 for VS2015 compat
61,745
05.06.2022 12:20:03
-3,600
52fc88de5fb1faaebdd2600e91837fc9e95878eb
Cleanup MSVC \W4 for core and \W3 for CLI
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_color_quantize.cpp", "new_path": "Source/astcenc_color_quantize.cpp", "diff": "@@ -68,7 +68,7 @@ static inline int quant_color_clamp(\n* @return The encoded quantized value. These are not necessarily in order; the compressor\n* scrambles the values slightly to make hardware implementation easier.\n*/\n-static inline int quant_color(\n+static inline uint8_t quant_color(\nquant_method quant_level,\nint value\n) {\n@@ -84,7 +84,7 @@ static inline int quant_color(\n* @return The encoded quantized value. These are not necessarily in order; the compressor\n* scrambles the values slightly to make hardware implementation easier.\n*/\n-static inline int unquant_color(\n+static inline uint8_t unquant_color(\nquant_method quant_level,\nint value\n) {\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_color_unquantize.cpp", "new_path": "Source/astcenc_color_unquantize.cpp", "diff": "@@ -53,7 +53,7 @@ static ASTCENC_SIMD_INLINE vint4 unquant_color(\n* @return The encoded quantized value. These are not necessarily in order; the compressor\n* scrambles the values slightly to make hardware implementation easier.\n*/\n-static inline int unquant_color(\n+static inline uint8_t unquant_color(\nquant_method quant_level,\nint value\n) {\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_compress_symbolic.cpp", "new_path": "Source/astcenc_compress_symbolic.cpp", "diff": "@@ -156,12 +156,12 @@ static bool realign_weights_undecimated(\n// Check if the prev or next error is better, and if so use it\nif ((error_up < error_base) && (error_up < error_down) && (uqw < 64))\n{\n- dec_weights_uquant[texel] = uqw_up;\n+ dec_weights_uquant[texel] = static_cast<uint8_t>(uqw_up);\nadjustments = true;\n}\nelse if ((error_down < error_base) && (uqw > 0))\n{\n- dec_weights_uquant[texel] = uqw_down;\n+ dec_weights_uquant[texel] = static_cast<uint8_t>(uqw_down);\nadjustments = true;\n}\n}\n@@ -1424,7 +1424,8 @@ END_OF_TESTS:\n#endif\nscb.block_type = SYM_BTYPE_CONST_U16;\n- scb.block_mode = -2;\n+ // TODO: Replace these block modes with symbolic values\n+ scb.block_mode = static_cast<uint16_t>(-2);\nvfloat4 color_f32 = clamp(0.0f, 1.0f, blk.origin_texel) * 65535.0f;\nvint4 color_u16 = float_to_int_rtn(color_f32);\nstore(color_u16, scb.constant_color);\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_entry.cpp", "new_path": "Source/astcenc_entry.cpp", "diff": "@@ -815,7 +815,7 @@ static void compress_image(\nint block_x = bsd.xdim;\nint block_y = bsd.ydim;\nint block_z = bsd.zdim;\n- blk.texel_count = block_x * block_y * block_z;\n+ blk.texel_count = static_cast<uint8_t>(block_x * block_y * block_z);\nint dim_x = image.dim_x;\nint dim_y = image.dim_y;\n@@ -1112,7 +1112,7 @@ astcenc_error astcenc_decompress_image(\n}\nimage_block blk;\n- blk.texel_count = block_x * block_y * block_z;\n+ blk.texel_count = static_cast<uint8_t>(block_x * block_y * block_z);\n// If context thread count is one then implicitly reset\nif (ctx->thread_count == 1)\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_mathlib_softfloat.cpp", "new_path": "Source/astcenc_mathlib_softfloat.cpp", "diff": "@@ -273,7 +273,7 @@ static sf16 sf32_to_sf16(sf32 inp, roundmode rmode)\nof the mantissa is set.)\n*/\np = (inp - 1) & UINT32_C(0x800000); /* zero if INF, nonzero if NaN. */\n- return ((inp + vlx) >> 13) | (p >> 14);\n+ return static_cast<sf16>(((inp + vlx) >> 13) | (p >> 14));\n/*\npositive, exponent = 0, round-mode == UP; need to check whether number actually is 0.\nIf it is, then return 0, else return 1 (the smallest representable nonzero number)\n@@ -283,7 +283,7 @@ static sf16 sf32_to_sf16(sf32 inp, roundmode rmode)\n-inp will set the MSB if the input number is nonzero.\nThus (-inp) >> 31 will turn into 0 if the input number is 0 and 1 otherwise.\n*/\n- return static_cast<uint32_t>((-static_cast<int32_t>(inp))) >> 31;\n+ return static_cast<sf16>(static_cast<uint32_t>((-static_cast<int32_t>(inp))) >> 31);\n/*\nnegative, exponent = , round-mode == DOWN, need to check whether number is\n@@ -296,7 +296,7 @@ static sf16 sf32_to_sf16(sf32 inp, roundmode rmode)\nthe MSB set if it isn't. We then right-shift the value by 31 places to\nget a value that is 0 if the input is -0.0 and 1 otherwise.\n*/\n- return ((vlx - inp) >> 31) + UINT32_C(0x8000);\n+ return static_cast<sf16>(((vlx - inp) >> 31) + UINT32_C(0x8000));\n/*\nfor all other cases involving underflow/overflow, we don't need to\n@@ -330,7 +330,7 @@ static sf16 sf32_to_sf16(sf32 inp, roundmode rmode)\ncase 47:\ncase 48:\ncase 49:\n- return vlx;\n+ return static_cast<sf16>(vlx);\n/*\nfor normal numbers, 'vlx' is the difference between the FP32 value of a number and the\n@@ -349,14 +349,14 @@ static sf16 sf32_to_sf16(sf32 inp, roundmode rmode)\ncase 36:\ncase 37:\ncase 39:\n- return (inp + vlx) >> 13;\n+ return static_cast<sf16>((inp + vlx) >> 13);\n/* normal number, round-to-nearest-even. */\ncase 33:\ncase 38:\np = inp + vlx;\np += (inp >> 13) & 1;\n- return p >> 13;\n+ return static_cast<sf16>(p >> 13);\n/*\nthe various denormal cases. These are not expected to be common, so their performance is a bit\n@@ -371,22 +371,22 @@ static sf16 sf32_to_sf16(sf32 inp, roundmode rmode)\ncase 27:\n/* denormal, round towards zero. */\np = 126 - ((inp >> 23) & 0xFF);\n- return (((inp & UINT32_C(0x7FFFFF)) + UINT32_C(0x800000)) >> p) | vlx;\n+ return static_cast<sf16>((((inp & UINT32_C(0x7FFFFF)) + UINT32_C(0x800000)) >> p) | vlx);\ncase 20:\ncase 26:\n/* denormal, round away from zero. */\np = 126 - ((inp >> 23) & 0xFF);\n- return rtup_shift32((inp & UINT32_C(0x7FFFFF)) + UINT32_C(0x800000), p) | vlx;\n+ return static_cast<sf16>(rtup_shift32((inp & UINT32_C(0x7FFFFF)) + UINT32_C(0x800000), p) | vlx);\ncase 24:\ncase 29:\n/* denormal, round to nearest-away */\np = 126 - ((inp >> 23) & 0xFF);\n- return rtna_shift32((inp & UINT32_C(0x7FFFFF)) + UINT32_C(0x800000), p) | vlx;\n+ return static_cast<sf16>(rtna_shift32((inp & UINT32_C(0x7FFFFF)) + UINT32_C(0x800000), p) | vlx);\ncase 23:\ncase 28:\n/* denormal, round to nearest-even. */\np = 126 - ((inp >> 23) & 0xFF);\n- return rtne_shift32((inp & UINT32_C(0x7FFFFF)) + UINT32_C(0x800000), p) | vlx;\n+ return static_cast<sf16>(rtne_shift32((inp & UINT32_C(0x7FFFFF)) + UINT32_C(0x800000), p) | vlx);\n}\nreturn 0;\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_partition_tables.cpp", "new_path": "Source/astcenc_partition_tables.cpp", "diff": "@@ -316,7 +316,7 @@ static bool generate_one_partition_info_entry(\n}\n// Populate the partition index\n- pi.partition_index = partition_index;\n+ pi.partition_index = static_cast<uint16_t>(partition_index);\n// Populate the coverage bitmaps for 2/3/4 partitions\nuint64_t* bitmaps { nullptr };\n@@ -429,7 +429,7 @@ static void build_partition_table_for_one_partition_count(\n{\nif (x == 0)\n{\n- bsd.partitioning_packed_index[partition_count - 2][i] = next_index;\n+ bsd.partitioning_packed_index[partition_count - 2][i] = static_cast<uint16_t>(next_index);\nbsd.partitioning_count_selected[partition_count - 1]++;\nbsd.partitioning_count_all[partition_count - 1]++;\nbuild[i] = 1;\n@@ -440,7 +440,7 @@ static void build_partition_table_for_one_partition_count(\n{\nif (x == 1)\n{\n- bsd.partitioning_packed_index[partition_count - 2][i] = next_index;\n+ bsd.partitioning_packed_index[partition_count - 2][i] = static_cast<uint16_t>(next_index);\nbsd.partitioning_count_all[partition_count - 1]++;\npartitioning_valid[partition_count - 2][next_index] = 255;\nnext_index++;\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_sse_4.h", "new_path": "Source/astcenc_vecmathlib_sse_4.h", "diff": "@@ -980,10 +980,10 @@ ASTCENC_SIMD_INLINE vfloat4 float16_to_float(vint4 a)\nreturn vfloat4(f32);\n#else\nreturn vfloat4(\n- sf16_to_float(a.lane<0>()),\n- sf16_to_float(a.lane<1>()),\n- sf16_to_float(a.lane<2>()),\n- sf16_to_float(a.lane<3>()));\n+ sf16_to_float(static_cast<uint16_t>(a.lane<0>())),\n+ sf16_to_float(static_cast<uint16_t>(a.lane<1>())),\n+ sf16_to_float(static_cast<uint16_t>(a.lane<2>())),\n+ sf16_to_float(static_cast<uint16_t>(a.lane<3>())));\n#endif\n}\n" }, { "change_type": "MODIFY", "old_path": "Source/cmake_core.cmake", "new_path": "Source/cmake_core.cmake", "diff": "@@ -118,7 +118,8 @@ macro(astcenc_set_properties NAME)\n# MSVC compiler defines\n$<$<CXX_COMPILER_ID:MSVC>:/EHsc>\n- $<$<CXX_COMPILER_ID:MSVC>:/fp:strict>\n+ $<$<CXX_COMPILER_ID:MSVC>:/W4>\n+ $<$<CXX_COMPILER_ID:MSVC>:/wd\"4324\">\n# G++ and Clang++ compiler defines\n$<$<NOT:$<CXX_COMPILER_ID:MSVC>>:-Wall>\n@@ -292,6 +293,10 @@ astcenc_set_properties(${ASTC_TARGET}-static)\nif(${CLI})\nastcenc_set_properties(${ASTC_TARGET})\n+ target_compile_options(${ASTC_TARGET}\n+ PRIVATE\n+ $<$<CXX_COMPILER_ID:MSVC>:/W3>)\n+\nstring(TIMESTAMP astcencoder_YEAR \"%Y\")\nconfigure_file(\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Cleanup MSVC \W4 for core and \W3 for CLI
61,745
05.06.2022 19:05:25
-3,600
fbfcb7bd35df18888111291dd1904d66e5902554
Change warnings to work with NMake
[ { "change_type": "MODIFY", "old_path": "Source/cmake_core.cmake", "new_path": "Source/cmake_core.cmake", "diff": "@@ -118,8 +118,7 @@ macro(astcenc_set_properties NAME)\n# MSVC compiler defines\n$<$<CXX_COMPILER_ID:MSVC>:/EHsc>\n- $<$<CXX_COMPILER_ID:MSVC>:/W4>\n- $<$<CXX_COMPILER_ID:MSVC>:/wd\"4324\">\n+ $<$<CXX_COMPILER_ID:MSVC>:/wd4324>\n# G++ and Clang++ compiler defines\n$<$<NOT:$<CXX_COMPILER_ID:MSVC>>:-Wall>\n@@ -290,6 +289,10 @@ endif()\nastcenc_set_properties(${ASTC_TARGET}-static)\n+ target_compile_options(${ASTC_TARGET}-static\n+ PRIVATE\n+ $<$<CXX_COMPILER_ID:MSVC>:/W4>)\n+\nif(${CLI})\nastcenc_set_properties(${ASTC_TARGET})\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Change warnings to work with NMake
61,745
05.06.2022 19:21:23
-3,600
b0e3a77bec4df81bcda04d0928c02c900edd9605
Avoid _mm_bslli_si128 for VS2015 compat (method 2)
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_sse_4.h", "new_path": "Source/astcenc_vecmathlib_sse_4.h", "diff": "@@ -1166,9 +1166,9 @@ ASTCENC_SIMD_INLINE vint4 vtable_8bt_32bi(vint4 t0, vint4 t1, vint4 t2, vint4 t3\nASTCENC_SIMD_INLINE vint4 interleave_rgba8(vint4 r, vint4 g, vint4 b, vint4 a)\n{\n__m128i value = r.m;\n- value = _mm_add_epi32(value, _mm_bslli_si128(g.m, 1));\n- value = _mm_add_epi32(value, _mm_bslli_si128(b.m, 2));\n- value = _mm_add_epi32(value, _mm_bslli_si128(a.m, 3));\n+ value = _mm_add_epi32(value, _mm_slli_epi32(g.m, 8));\n+ value = _mm_add_epi32(value, _mm_slli_epi32(b.m, 16));\n+ value = _mm_add_epi32(value, _mm_slli_epi32(a.m, 24));\nreturn vint4(value);\n}\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Avoid _mm_bslli_si128 for VS2015 compat (method 2)
61,745
05.06.2022 20:12:26
-3,600
5fe801553a2d385c01ca54035653478cfab87383
Fix store_lanes_masked() fallback
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_avx2_8.h", "new_path": "Source/astcenc_vecmathlib_avx2_8.h", "diff": "@@ -1167,6 +1167,17 @@ ASTCENC_SIMD_INLINE void print(vint8 a)\nv[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);\n}\n+/**\n+ * @brief Debug function to print a vector of ints.\n+ */\n+ASTCENC_SIMD_INLINE void printx(vint8 a)\n+{\n+ alignas(ASTCENC_VECALIGN) int v[8];\n+ storea(a, v);\n+ printf(\"v8_i32:\\n %08x %08x %08x %08x %08x %08x %08x %08x\\n\",\n+ v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);\n+}\n+\n/**\n* @brief Debug function to print a vector of floats.\n*/\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_common_4.h", "new_path": "Source/astcenc_vecmathlib_common_4.h", "diff": "@@ -372,6 +372,17 @@ ASTCENC_SIMD_INLINE void print(vint4 a)\nv[0], v[1], v[2], v[3]);\n}\n+/**\n+ * @brief Debug function to print a vector of ints.\n+ */\n+ASTCENC_SIMD_INLINE void printx(vint4 a)\n+{\n+ alignas(16) int v[4];\n+ storea(a, v);\n+ printf(\"v4_i32:\\n %08x %08x %08x %08x\\n\",\n+ v[0], v[1], v[2], v[3]);\n+}\n+\n/**\n* @brief Debug function to print a vector of floats.\n*/\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_neon_4.h", "new_path": "Source/astcenc_vecmathlib_neon_4.h", "diff": "@@ -1035,7 +1035,6 @@ ASTCENC_SIMD_INLINE void store_lanes_masked(int* base, vint4 data, vmask4 mask)\nelse if(mask.m[0])\n{\nbase[0] = data.lane<0>();\n- base[1] = data.lane<1>();\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_none_4.h", "new_path": "Source/astcenc_vecmathlib_none_4.h", "diff": "@@ -1158,7 +1158,6 @@ ASTCENC_SIMD_INLINE void store_lanes_masked(int* base, vint4 data, vmask4 mask)\nelse if(mask.m[0])\n{\nbase[0] = data.lane<0>();\n- base[1] = data.lane<1>();\n}\n}\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Fix store_lanes_masked() fallback
61,745
05.06.2022 20:16:11
-3,600
22e142cf8a8e1fa46b3f83956c369d8a2a1cd47f
Workaround XCode LLVM compiler crash
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_vecmathlib_sse_4.h", "new_path": "Source/astcenc_vecmathlib_sse_4.h", "diff": "@@ -1165,11 +1165,21 @@ ASTCENC_SIMD_INLINE vint4 vtable_8bt_32bi(vint4 t0, vint4 t1, vint4 t2, vint4 t3\n*/\nASTCENC_SIMD_INLINE vint4 interleave_rgba8(vint4 r, vint4 g, vint4 b, vint4 a)\n{\n+// Workaround an XCode compiler internal fault; note is slower than slli_epi32\n+// so we should revert this when we get the opportunity\n+#if __APPLE__\n+ __m128i value = r.m;\n+ value = _mm_add_epi32(value, _mm_bslli_si128(g.m, 1));\n+ value = _mm_add_epi32(value, _mm_bslli_si128(b.m, 2));\n+ value = _mm_add_epi32(value, _mm_bslli_si128(a.m, 3));\n+ return vint4(value);\n+#else\n__m128i value = r.m;\nvalue = _mm_add_epi32(value, _mm_slli_epi32(g.m, 8));\nvalue = _mm_add_epi32(value, _mm_slli_epi32(b.m, 16));\nvalue = _mm_add_epi32(value, _mm_slli_epi32(a.m, 24));\nreturn vint4(value);\n+#endif\n}\n/**\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Workaround XCode LLVM compiler crash
61,745
05.06.2022 21:38:30
-3,600
a9568c1ac52f7584d22c376775f9381e944fea22
Update diagnostic trace
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_compress_symbolic.cpp", "new_path": "Source/astcenc_compress_symbolic.cpp", "diff": "@@ -527,7 +527,7 @@ static float compress_symbolic_block_for_partition_1plane(\ntrace_add_data(\"weight_x\", di.weight_x);\ntrace_add_data(\"weight_y\", di.weight_y);\ntrace_add_data(\"weight_z\", di.weight_z);\n- trace_add_data(\"weight_quant\", weight_quant_mode);\n+ trace_add_data(\"weight_quant\", qw_bm.quant_mode);\n// Recompute the ideal color endpoints before storing them\nvfloat4 rgbs_colors[BLOCK_MAX_PARTITIONS];\n@@ -910,7 +910,7 @@ static float compress_symbolic_block_for_partition_2planes(\ntrace_add_data(\"weight_x\", di.weight_x);\ntrace_add_data(\"weight_y\", di.weight_y);\ntrace_add_data(\"weight_z\", di.weight_z);\n- trace_add_data(\"weight_quant\", weight_quant_mode);\n+ trace_add_data(\"weight_quant\", qw_bm.quant_mode);\nvfloat4 rgbs_color;\nvfloat4 rgbo_color;\n@@ -1201,13 +1201,13 @@ void compress_block(\n#if defined(ASTCENC_DIAGNOSTICS)\n// Do this early in diagnostic builds so we can dump uniform metrics\n// for every block. Do it later in release builds to avoid redundant work!\n- float error_weight_sum = hadd_s(blk.channel_weight) * bsd->texel_count;\n+ float error_weight_sum = hadd_s(blk.channel_weight) * bsd.texel_count;\nfloat error_threshold = ctx.config.tune_db_limit\n* error_weight_sum\n* block_is_l_scale\n* block_is_la_scale;\n- lowest_correl = prepare_block_statistics(bsd->texel_count, blk);\n+ lowest_correl = prepare_block_statistics(bsd.texel_count, blk);\ntrace_add_data(\"lowest_correl\", lowest_correl);\ntrace_add_data(\"tune_error_threshold\", error_threshold);\n#endif\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_diagnostic_trace.cpp", "new_path": "Source/astcenc_diagnostic_trace.cpp", "diff": "// SPDX-License-Identifier: Apache-2.0\n// ----------------------------------------------------------------------------\n-// Copyright 2021 Arm Limited\n+// Copyright 2021-2022 Arm Limited\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n// use this file except in compliance with the License. You may obtain a copy\nstatic TraceLog* g_TraceLog = nullptr;\n/** @brief The JSON indentation level. */\n-static const int g_trace_indent = 2;\n+static const size_t g_trace_indent = 2;\nTraceLog::TraceLog(\nconst char* file_name):\n@@ -55,7 +55,7 @@ TraceNode* TraceLog::get_current_leaf()\n}\n/* See header for documentation. */\n-int TraceLog::get_depth()\n+size_t TraceLog::get_depth()\n{\nreturn m_stack.size();\n}\n@@ -87,7 +87,7 @@ TraceNode::TraceNode(\n// Generate the node\nTraceNode* parent = g_TraceLog->get_current_leaf();\n- int depth = g_TraceLog->get_depth();\n+ size_t depth = g_TraceLog->get_depth();\ng_TraceLog->m_stack.push_back(this);\nbool comma = parent && parent->m_attrib_count;\n@@ -108,8 +108,8 @@ TraceNode::TraceNode(\nout << '\\n';\n}\n- int out_indent = (depth * 2) * g_trace_indent;\n- int in_indent = (depth * 2 + 1) * g_trace_indent;\n+ size_t out_indent = (depth * 2) * g_trace_indent;\n+ size_t in_indent = (depth * 2 + 1) * g_trace_indent;\nstd::string out_indents(\"\");\nif (out_indent)\n@@ -131,8 +131,8 @@ void TraceNode::add_attrib(\n) {\n(void)type;\n- int depth = g_TraceLog->get_depth();\n- int indent = (depth * 2) * g_trace_indent;\n+ size_t depth = g_TraceLog->get_depth();\n+ size_t indent = (depth * 2) * g_trace_indent;\nauto& out = g_TraceLog->m_file;\nbool comma = m_attrib_count;\nm_attrib_count++;\n@@ -154,9 +154,9 @@ TraceNode::~TraceNode()\ng_TraceLog->m_stack.pop_back();\nauto& out = g_TraceLog->m_file;\n- int depth = g_TraceLog->get_depth();\n- int out_indent = (depth * 2) * g_trace_indent;\n- int in_indent = (depth * 2 + 1) * g_trace_indent;\n+ size_t depth = g_TraceLog->get_depth();\n+ size_t out_indent = (depth * 2) * g_trace_indent;\n+ size_t in_indent = (depth * 2 + 1) * g_trace_indent;\nstd::string out_indents(\"\");\nif (out_indent)\n" }, { "change_type": "MODIFY", "old_path": "Source/astcenc_diagnostic_trace.h", "new_path": "Source/astcenc_diagnostic_trace.h", "diff": "// SPDX-License-Identifier: Apache-2.0\n// ----------------------------------------------------------------------------\n-// Copyright 2021 Arm Limited\n+// Copyright 2021-2022 Arm Limited\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n// use this file except in compliance with the License. You may obtain a copy\n@@ -148,7 +148,7 @@ public:\n*\n* @return The current leaf node stack depth.\n*/\n- int get_depth();\n+ size_t get_depth();\n/**\n* @brief The file stream to write to.\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Update diagnostic trace
61,745
05.06.2022 21:59:22
-3,600
9ca2fe635f511a5988d692a320d61eab0cc927ce
Init block_mode before decimation_mode
[ { "change_type": "MODIFY", "old_path": "Source/astcenc_block_sizes.cpp", "new_path": "Source/astcenc_block_sizes.cpp", "diff": "@@ -949,6 +949,13 @@ static void construct_block_size_descriptor_2d(\n}\nauto& bm = bsd.block_modes[packed_bm_idx];\n+\n+ bm.decimation_mode = static_cast<uint8_t>(decimation_mode);\n+ bm.quant_mode = static_cast<uint8_t>(quant_mode);\n+ bm.is_dual_plane = static_cast<uint8_t>(is_dual_plane);\n+ bm.weight_bits = static_cast<uint8_t>(weight_bits);\n+ bm.mode_index = static_cast<uint16_t>(i);\n+\nauto& dm = bsd.decimation_modes[decimation_mode];\nif (is_dual_plane)\n@@ -960,12 +967,6 @@ static void construct_block_size_descriptor_2d(\ndm.set_ref_1_plane(bm.get_weight_quant_mode());\n}\n- bm.decimation_mode = static_cast<uint8_t>(decimation_mode);\n- bm.quant_mode = static_cast<uint8_t>(quant_mode);\n- bm.is_dual_plane = static_cast<uint8_t>(is_dual_plane);\n- bm.weight_bits = static_cast<uint8_t>(weight_bits);\n- bm.mode_index = static_cast<uint16_t>(i);\n-\nbsd.block_mode_packed_index[i] = static_cast<uint16_t>(packed_bm_idx);\npacked_bm_idx++;\n" } ]
C
Apache License 2.0
arm-software/astc-encoder
Init block_mode before decimation_mode