From 883dbce74a3d6d66fd0f57f383284aef378db47c Mon Sep 17 00:00:00 2001 From: Jeffrey Walton Date: Thu, 3 Oct 2019 20:44:33 -0400 Subject: [PATCH] Remove double semicolons after sed'ing defines Also see https://github.com/weidai11/cryptopp/issues/889 --- adler32.cpp | 2 +- asn.cpp | 2 +- cham.cpp | 4 ++-- filters.cpp | 4 ++-- lea.cpp | 2 +- lea_simd.cpp | 4 ++-- mersenne.h | 2 +- rijndael.cpp | 2 +- sha_simd.cpp | 32 ++++++++++++++++---------------- simon.cpp | 12 ++++++------ speck.cpp | 12 ++++++------ validat2.cpp | 2 +- 12 files changed, 40 insertions(+), 40 deletions(-) diff --git a/adler32.cpp b/adler32.cpp index 75c2e659..2cfb1f44 100644 --- a/adler32.cpp +++ b/adler32.cpp @@ -72,7 +72,7 @@ void Adler32::TruncatedFinal(byte *hash, size_t size) hash[0] = byte(m_s2 >> 8); // fall through case 0: - ;; + ; // fall through } diff --git a/asn.cpp b/asn.cpp index bf30667b..61880b3b 100644 --- a/asn.cpp +++ b/asn.cpp @@ -455,7 +455,7 @@ void EncodedObjectFilter::Put(const byte *inString, size_t length) // fall through case TAIL: case ALL_DONE: - default: ;; + default: ; } if (m_state == IDENTIFIER && m_level == 0) diff --git a/cham.cpp b/cham.cpp index 2b92942f..ace0f07a 100644 --- a/cham.cpp +++ b/cham.cpp @@ -273,7 +273,7 @@ void CHAM128::Enc::ProcessAndXorBlock(const byte *inBlock, const byte *xorBlock, break; } default: - CRYPTOPP_ASSERT(0);; + CRYPTOPP_ASSERT(0); } PutBlock oblock(xorBlock, outBlock); @@ -329,7 +329,7 @@ void CHAM128::Dec::ProcessAndXorBlock(const byte *inBlock, const byte *xorBlock, break; } default: - CRYPTOPP_ASSERT(0);; + CRYPTOPP_ASSERT(0); } PutBlock oblock(xorBlock, outBlock); diff --git a/filters.cpp b/filters.cpp index e4cb1d4a..4fff2a2f 100644 --- a/filters.cpp +++ b/filters.cpp @@ -87,7 +87,7 @@ bool Filter::Flush(bool hardFlush, int propagation, bool blocking) if (OutputFlush(1, hardFlush, propagation, blocking)) return true; // fall through - default: ;; + default: ; } return false; } @@ -104,7 +104,7 @@ bool Filter::MessageSeriesEnd(int propagation, bool blocking) if (ShouldPropagateMessageSeriesEnd() && OutputMessageSeriesEnd(1, propagation, blocking)) return true; // fall through - default: ;; + default: ; } return false; } diff --git a/lea.cpp b/lea.cpp index 4440189f..4361d513 100644 --- a/lea.cpp +++ b/lea.cpp @@ -616,7 +616,7 @@ void LEA::Base::UncheckedSetKey(const byte *userKey, unsigned int keyLength, con break; } default: - CRYPTOPP_ASSERT(0);; + CRYPTOPP_ASSERT(0); } } diff --git a/lea_simd.cpp b/lea_simd.cpp index ee2a5697..85ff6a3a 100644 --- a/lea_simd.cpp +++ b/lea_simd.cpp @@ -181,7 +181,7 @@ template inline uint32x4_t UnpackNEON(const uint32x4_t& a, const uint32x4_t& b, const uint32x4_t& c, const uint32x4_t& d) { // Should not be instantiated - CRYPTOPP_ASSERT(0);; + CRYPTOPP_ASSERT(0); return vmovq_n_u32(0); } @@ -221,7 +221,7 @@ template inline uint32x4_t UnpackNEON(const uint32x4_t& v) { // Should not be instantiated - CRYPTOPP_ASSERT(0);; + CRYPTOPP_ASSERT(0); return vmovq_n_u32(0); } diff --git a/mersenne.h b/mersenne.h index 4cebdd9d..9bd97341 100644 --- a/mersenne.h +++ b/mersenne.h @@ -92,7 +92,7 @@ public: case 2: output[1] = CRYPTOPP_GET_BYTE_AS_BYTE(temp, 2); /* fall through */ case 1: output[0] = CRYPTOPP_GET_BYTE_AS_BYTE(temp, 3); break; - default: CRYPTOPP_ASSERT(0); ;; + default: CRYPTOPP_ASSERT(0); ; } // Wipe temp diff --git a/rijndael.cpp b/rijndael.cpp index 258fb952..1c3bc295 100644 --- a/rijndael.cpp +++ b/rijndael.cpp @@ -299,7 +299,7 @@ void Rijndael::Base::FillDecTable() word32 y = word32(fd(x))<<8 | word32(f9(x))<<16 | word32(fe(x))<<24; Td[i] = word64(y | fb(x))<<32 | y | x; #else - word32 y = fb(x) | word32(fd(x))<<8 | word32(f9(x))<<16 | word32(fe(x))<<24;; + word32 y = fb(x) | word32(fd(x))<<8 | word32(f9(x))<<16 | word32(fe(x))<<24; for (int j=0; j<4; j++) { Td[i+j*256] = y; diff --git a/sha_simd.cpp b/sha_simd.cpp index ec345e39..6b6852f0 100644 --- a/sha_simd.cpp +++ b/sha_simd.cpp @@ -862,7 +862,7 @@ void SHA256_HashMultipleBlocks_ARMV8(word32 *state, const word32 *data, size_t l TMP1 = vaddq_u32(MSG1, vld1q_u32(&SHA256_K[0x04])); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); - MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3);; + MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3); // Rounds 4-7 MSG1 = vsha256su0q_u32(MSG1, MSG2); @@ -870,7 +870,7 @@ void SHA256_HashMultipleBlocks_ARMV8(word32 *state, const word32 *data, size_t l TMP0 = vaddq_u32(MSG2, vld1q_u32(&SHA256_K[0x08])); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1); - MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0);; + MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0); // Rounds 8-11 MSG2 = vsha256su0q_u32(MSG2, MSG3); @@ -878,7 +878,7 @@ void SHA256_HashMultipleBlocks_ARMV8(word32 *state, const word32 *data, size_t l TMP1 = vaddq_u32(MSG3, vld1q_u32(&SHA256_K[0x0c])); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); - MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1);; + MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1); // Rounds 12-15 MSG3 = vsha256su0q_u32(MSG3, MSG0); @@ -886,7 +886,7 @@ void SHA256_HashMultipleBlocks_ARMV8(word32 *state, const word32 *data, size_t l TMP0 = vaddq_u32(MSG0, vld1q_u32(&SHA256_K[0x10])); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1); - MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2);; + MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2); // Rounds 16-19 MSG0 = vsha256su0q_u32(MSG0, MSG1); @@ -894,7 +894,7 @@ void SHA256_HashMultipleBlocks_ARMV8(word32 *state, const word32 *data, size_t l TMP1 = vaddq_u32(MSG1, vld1q_u32(&SHA256_K[0x14])); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); - MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3);; + MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3); // Rounds 20-23 MSG1 = vsha256su0q_u32(MSG1, MSG2); @@ -902,7 +902,7 @@ void SHA256_HashMultipleBlocks_ARMV8(word32 *state, const word32 *data, size_t l TMP0 = vaddq_u32(MSG2, vld1q_u32(&SHA256_K[0x18])); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1); - MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0);; + MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0); // Rounds 24-27 MSG2 = vsha256su0q_u32(MSG2, MSG3); @@ -910,7 +910,7 @@ void SHA256_HashMultipleBlocks_ARMV8(word32 *state, const word32 *data, size_t l TMP1 = vaddq_u32(MSG3, vld1q_u32(&SHA256_K[0x1c])); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); - MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1);; + MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1); // Rounds 28-31 MSG3 = vsha256su0q_u32(MSG3, MSG0); @@ -918,7 +918,7 @@ void SHA256_HashMultipleBlocks_ARMV8(word32 *state, const word32 *data, size_t l TMP0 = vaddq_u32(MSG0, vld1q_u32(&SHA256_K[0x20])); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1); - MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2);; + MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2); // Rounds 32-35 MSG0 = vsha256su0q_u32(MSG0, MSG1); @@ -926,7 +926,7 @@ void SHA256_HashMultipleBlocks_ARMV8(word32 *state, const word32 *data, size_t l TMP1 = vaddq_u32(MSG1, vld1q_u32(&SHA256_K[0x24])); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); - MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3);; + MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3); // Rounds 36-39 MSG1 = vsha256su0q_u32(MSG1, MSG2); @@ -934,7 +934,7 @@ void SHA256_HashMultipleBlocks_ARMV8(word32 *state, const word32 *data, size_t l TMP0 = vaddq_u32(MSG2, vld1q_u32(&SHA256_K[0x28])); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1); - MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0);; + MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0); // Rounds 40-43 MSG2 = vsha256su0q_u32(MSG2, MSG3); @@ -942,7 +942,7 @@ void SHA256_HashMultipleBlocks_ARMV8(word32 *state, const word32 *data, size_t l TMP1 = vaddq_u32(MSG3, vld1q_u32(&SHA256_K[0x2c])); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); - MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1);; + MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1); // Rounds 44-47 MSG3 = vsha256su0q_u32(MSG3, MSG0); @@ -950,30 +950,30 @@ void SHA256_HashMultipleBlocks_ARMV8(word32 *state, const word32 *data, size_t l TMP0 = vaddq_u32(MSG0, vld1q_u32(&SHA256_K[0x30])); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1); STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1); - MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2);; + MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2); // Rounds 48-51 TMP2 = STATE0; TMP1 = vaddq_u32(MSG1, vld1q_u32(&SHA256_K[0x34])); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); - STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);; + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Rounds 52-55 TMP2 = STATE0; TMP0 = vaddq_u32(MSG2, vld1q_u32(&SHA256_K[0x38])); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1); - STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);; + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1); // Rounds 56-59 TMP2 = STATE0; TMP1 = vaddq_u32(MSG3, vld1q_u32(&SHA256_K[0x3c])); STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); - STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);; + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); // Rounds 60-63 TMP2 = STATE0; STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1); - STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);; + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1); // Add back to state STATE0 = vaddq_u32(STATE0, ABEF_SAVE); diff --git a/simon.cpp b/simon.cpp index d508d974..91f8bf1d 100644 --- a/simon.cpp +++ b/simon.cpp @@ -294,7 +294,7 @@ void SIMON64::Base::UncheckedSetKey(const byte *userKey, unsigned int keyLength, SIMON64_ExpandKey_4W(m_rkeys, m_wspace); break; default: - CRYPTOPP_ASSERT(0);; + CRYPTOPP_ASSERT(0); } // Altivec loads the current subkey as a 16-byte vector @@ -319,7 +319,7 @@ void SIMON64::Enc::ProcessAndXorBlock(const byte *inBlock, const byte *xorBlock, SIMON_Encrypt(m_wspace+2, m_wspace+0, m_rkeys); break; default: - CRYPTOPP_ASSERT(0);; + CRYPTOPP_ASSERT(0); } // Do the endian gyrations from the paper and align pointers @@ -342,7 +342,7 @@ void SIMON64::Dec::ProcessAndXorBlock(const byte *inBlock, const byte *xorBlock, SIMON_Decrypt(m_wspace+2, m_wspace+0, m_rkeys); break; default: - CRYPTOPP_ASSERT(0);; + CRYPTOPP_ASSERT(0); } // Do the endian gyrations from the paper and align pointers @@ -403,7 +403,7 @@ void SIMON128::Base::UncheckedSetKey(const byte *userKey, unsigned int keyLength SIMON128_ExpandKey_4W(m_rkeys, m_wspace); break; default: - CRYPTOPP_ASSERT(0);; + CRYPTOPP_ASSERT(0); } } @@ -425,7 +425,7 @@ void SIMON128::Enc::ProcessAndXorBlock(const byte *inBlock, const byte *xorBlock SIMON_Encrypt(m_wspace+2, m_wspace+0, m_rkeys); break; default: - CRYPTOPP_ASSERT(0);; + CRYPTOPP_ASSERT(0); } // Do the endian gyrations from the paper and align pointers @@ -451,7 +451,7 @@ void SIMON128::Dec::ProcessAndXorBlock(const byte *inBlock, const byte *xorBlock SIMON_Decrypt(m_wspace+2, m_wspace+0, m_rkeys); break; default: - CRYPTOPP_ASSERT(0);; + CRYPTOPP_ASSERT(0); } // Do the endian gyrations from the paper and align pointers diff --git a/speck.cpp b/speck.cpp index 151bac78..9cc2b0c0 100644 --- a/speck.cpp +++ b/speck.cpp @@ -274,7 +274,7 @@ void SPECK64::Base::UncheckedSetKey(const byte *userKey, unsigned int keyLength, SPECK_ExpandKey_4W(m_rkeys, m_wspace); break; default: - CRYPTOPP_ASSERT(0);; + CRYPTOPP_ASSERT(0); } // Altivec loads the current subkey as a 16-byte vector @@ -299,7 +299,7 @@ void SPECK64::Enc::ProcessAndXorBlock(const byte *inBlock, const byte *xorBlock, SPECK_Encrypt(m_wspace+2, m_wspace+0, m_rkeys); break; default: - CRYPTOPP_ASSERT(0);; + CRYPTOPP_ASSERT(0); } // Do the endian gyrations from the paper and align pointers @@ -322,7 +322,7 @@ void SPECK64::Dec::ProcessAndXorBlock(const byte *inBlock, const byte *xorBlock, SPECK_Decrypt(m_wspace+2, m_wspace+0, m_rkeys); break; default: - CRYPTOPP_ASSERT(0);; + CRYPTOPP_ASSERT(0); } // Do the endian gyrations from the paper and align pointers @@ -383,7 +383,7 @@ void SPECK128::Base::UncheckedSetKey(const byte *userKey, unsigned int keyLength SPECK_ExpandKey_4W(m_rkeys, m_wspace); break; default: - CRYPTOPP_ASSERT(0);; + CRYPTOPP_ASSERT(0); } } @@ -405,7 +405,7 @@ void SPECK128::Enc::ProcessAndXorBlock(const byte *inBlock, const byte *xorBlock SPECK_Encrypt(m_wspace+2, m_wspace+0, m_rkeys); break; default: - CRYPTOPP_ASSERT(0);; + CRYPTOPP_ASSERT(0); } // Do the endian gyrations from the paper and align pointers @@ -431,7 +431,7 @@ void SPECK128::Dec::ProcessAndXorBlock(const byte *inBlock, const byte *xorBlock SPECK_Decrypt(m_wspace+2, m_wspace+0, m_rkeys); break; default: - CRYPTOPP_ASSERT(0);; + CRYPTOPP_ASSERT(0); } // Do the endian gyrations from the paper and align pointers diff --git a/validat2.cpp b/validat2.cpp index dfb52475..d079eb26 100644 --- a/validat2.cpp +++ b/validat2.cpp @@ -998,7 +998,7 @@ bool TestIntegerOps() case 11: a = (m<<256)+1; break; default: - ;; + ; } Integer x = a.InverseMod(m);