mirror of
https://github.com/RPCS3/llvm.git
synced 2024-12-16 08:29:43 +00:00
e3e43d9d57
I did this a long time ago with a janky python script, but now clang-format has built-in support for this. I fed clang-format every line with a #include and let it re-sort things according to the precise LLVM rules for include ordering baked into clang-format these days. I've reverted a number of files where the results of sorting includes isn't healthy. Either places where we have legacy code relying on particular include ordering (where possible, I'll fix these separately) or where we have particular formatting around #include lines that I didn't want to disturb in this patch. This patch is *entirely* mechanical. If you get merge conflicts or anything, just ignore the changes in this patch and run clang-format over your #include lines in the files. Sorry for any noise here, but it is important to keep these things stable. I was seeing an increasing number of patches with irrelevant re-ordering of #include lines because clang-format was used. This patch at least isolates that churn, makes it easy to skip when resolving conflicts, and gets us to a clean baseline (again). git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@304787 91177308-0d34-0410-b5e6-96231b3b80d8
252 lines
8.6 KiB
C++
252 lines
8.6 KiB
C++
//===-- ConvertUTFWrapper.cpp - Wrap ConvertUTF.h with clang data types -----===
|
|
//
|
|
// The LLVM Compiler Infrastructure
|
|
//
|
|
// This file is distributed under the University of Illinois Open Source
|
|
// License. See LICENSE.TXT for details.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "llvm/ADT/ArrayRef.h"
|
|
#include "llvm/ADT/StringRef.h"
|
|
#include "llvm/Support/ConvertUTF.h"
|
|
#include "llvm/Support/ErrorHandling.h"
|
|
#include "llvm/Support/SwapByteOrder.h"
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
namespace llvm {
|
|
|
|
bool ConvertUTF8toWide(unsigned WideCharWidth, llvm::StringRef Source,
|
|
char *&ResultPtr, const UTF8 *&ErrorPtr) {
|
|
assert(WideCharWidth == 1 || WideCharWidth == 2 || WideCharWidth == 4);
|
|
ConversionResult result = conversionOK;
|
|
// Copy the character span over.
|
|
if (WideCharWidth == 1) {
|
|
const UTF8 *Pos = reinterpret_cast<const UTF8*>(Source.begin());
|
|
if (!isLegalUTF8String(&Pos, reinterpret_cast<const UTF8*>(Source.end()))) {
|
|
result = sourceIllegal;
|
|
ErrorPtr = Pos;
|
|
} else {
|
|
memcpy(ResultPtr, Source.data(), Source.size());
|
|
ResultPtr += Source.size();
|
|
}
|
|
} else if (WideCharWidth == 2) {
|
|
const UTF8 *sourceStart = (const UTF8*)Source.data();
|
|
// FIXME: Make the type of the result buffer correct instead of
|
|
// using reinterpret_cast.
|
|
UTF16 *targetStart = reinterpret_cast<UTF16*>(ResultPtr);
|
|
ConversionFlags flags = strictConversion;
|
|
result = ConvertUTF8toUTF16(
|
|
&sourceStart, sourceStart + Source.size(),
|
|
&targetStart, targetStart + Source.size(), flags);
|
|
if (result == conversionOK)
|
|
ResultPtr = reinterpret_cast<char*>(targetStart);
|
|
else
|
|
ErrorPtr = sourceStart;
|
|
} else if (WideCharWidth == 4) {
|
|
const UTF8 *sourceStart = (const UTF8*)Source.data();
|
|
// FIXME: Make the type of the result buffer correct instead of
|
|
// using reinterpret_cast.
|
|
UTF32 *targetStart = reinterpret_cast<UTF32*>(ResultPtr);
|
|
ConversionFlags flags = strictConversion;
|
|
result = ConvertUTF8toUTF32(
|
|
&sourceStart, sourceStart + Source.size(),
|
|
&targetStart, targetStart + Source.size(), flags);
|
|
if (result == conversionOK)
|
|
ResultPtr = reinterpret_cast<char*>(targetStart);
|
|
else
|
|
ErrorPtr = sourceStart;
|
|
}
|
|
assert((result != targetExhausted)
|
|
&& "ConvertUTF8toUTFXX exhausted target buffer");
|
|
return result == conversionOK;
|
|
}
|
|
|
|
bool ConvertCodePointToUTF8(unsigned Source, char *&ResultPtr) {
|
|
const UTF32 *SourceStart = &Source;
|
|
const UTF32 *SourceEnd = SourceStart + 1;
|
|
UTF8 *TargetStart = reinterpret_cast<UTF8 *>(ResultPtr);
|
|
UTF8 *TargetEnd = TargetStart + 4;
|
|
ConversionResult CR = ConvertUTF32toUTF8(&SourceStart, SourceEnd,
|
|
&TargetStart, TargetEnd,
|
|
strictConversion);
|
|
if (CR != conversionOK)
|
|
return false;
|
|
|
|
ResultPtr = reinterpret_cast<char*>(TargetStart);
|
|
return true;
|
|
}
|
|
|
|
bool hasUTF16ByteOrderMark(ArrayRef<char> S) {
|
|
return (S.size() >= 2 &&
|
|
((S[0] == '\xff' && S[1] == '\xfe') ||
|
|
(S[0] == '\xfe' && S[1] == '\xff')));
|
|
}
|
|
|
|
bool convertUTF16ToUTF8String(ArrayRef<char> SrcBytes, std::string &Out) {
|
|
assert(Out.empty());
|
|
|
|
// Error out on an uneven byte count.
|
|
if (SrcBytes.size() % 2)
|
|
return false;
|
|
|
|
// Avoid OOB by returning early on empty input.
|
|
if (SrcBytes.empty())
|
|
return true;
|
|
|
|
const UTF16 *Src = reinterpret_cast<const UTF16 *>(SrcBytes.begin());
|
|
const UTF16 *SrcEnd = reinterpret_cast<const UTF16 *>(SrcBytes.end());
|
|
|
|
// Byteswap if necessary.
|
|
std::vector<UTF16> ByteSwapped;
|
|
if (Src[0] == UNI_UTF16_BYTE_ORDER_MARK_SWAPPED) {
|
|
ByteSwapped.insert(ByteSwapped.end(), Src, SrcEnd);
|
|
for (unsigned I = 0, E = ByteSwapped.size(); I != E; ++I)
|
|
ByteSwapped[I] = llvm::sys::SwapByteOrder_16(ByteSwapped[I]);
|
|
Src = &ByteSwapped[0];
|
|
SrcEnd = &ByteSwapped[ByteSwapped.size() - 1] + 1;
|
|
}
|
|
|
|
// Skip the BOM for conversion.
|
|
if (Src[0] == UNI_UTF16_BYTE_ORDER_MARK_NATIVE)
|
|
Src++;
|
|
|
|
// Just allocate enough space up front. We'll shrink it later. Allocate
|
|
// enough that we can fit a null terminator without reallocating.
|
|
Out.resize(SrcBytes.size() * UNI_MAX_UTF8_BYTES_PER_CODE_POINT + 1);
|
|
UTF8 *Dst = reinterpret_cast<UTF8 *>(&Out[0]);
|
|
UTF8 *DstEnd = Dst + Out.size();
|
|
|
|
ConversionResult CR =
|
|
ConvertUTF16toUTF8(&Src, SrcEnd, &Dst, DstEnd, strictConversion);
|
|
assert(CR != targetExhausted);
|
|
|
|
if (CR != conversionOK) {
|
|
Out.clear();
|
|
return false;
|
|
}
|
|
|
|
Out.resize(reinterpret_cast<char *>(Dst) - &Out[0]);
|
|
Out.push_back(0);
|
|
Out.pop_back();
|
|
return true;
|
|
}
|
|
|
|
bool convertUTF16ToUTF8String(ArrayRef<UTF16> Src, std::string &Out)
|
|
{
|
|
return convertUTF16ToUTF8String(
|
|
llvm::ArrayRef<char>(reinterpret_cast<const char *>(Src.data()),
|
|
Src.size() * sizeof(UTF16)), Out);
|
|
}
|
|
|
|
bool convertUTF8ToUTF16String(StringRef SrcUTF8,
|
|
SmallVectorImpl<UTF16> &DstUTF16) {
|
|
assert(DstUTF16.empty());
|
|
|
|
// Avoid OOB by returning early on empty input.
|
|
if (SrcUTF8.empty()) {
|
|
DstUTF16.push_back(0);
|
|
DstUTF16.pop_back();
|
|
return true;
|
|
}
|
|
|
|
const UTF8 *Src = reinterpret_cast<const UTF8 *>(SrcUTF8.begin());
|
|
const UTF8 *SrcEnd = reinterpret_cast<const UTF8 *>(SrcUTF8.end());
|
|
|
|
// Allocate the same number of UTF-16 code units as UTF-8 code units. Encoding
|
|
// as UTF-16 should always require the same amount or less code units than the
|
|
// UTF-8 encoding. Allocate one extra byte for the null terminator though,
|
|
// so that someone calling DstUTF16.data() gets a null terminated string.
|
|
// We resize down later so we don't have to worry that this over allocates.
|
|
DstUTF16.resize(SrcUTF8.size()+1);
|
|
UTF16 *Dst = &DstUTF16[0];
|
|
UTF16 *DstEnd = Dst + DstUTF16.size();
|
|
|
|
ConversionResult CR =
|
|
ConvertUTF8toUTF16(&Src, SrcEnd, &Dst, DstEnd, strictConversion);
|
|
assert(CR != targetExhausted);
|
|
|
|
if (CR != conversionOK) {
|
|
DstUTF16.clear();
|
|
return false;
|
|
}
|
|
|
|
DstUTF16.resize(Dst - &DstUTF16[0]);
|
|
DstUTF16.push_back(0);
|
|
DstUTF16.pop_back();
|
|
return true;
|
|
}
|
|
|
|
static_assert(sizeof(wchar_t) == 1 || sizeof(wchar_t) == 2 ||
|
|
sizeof(wchar_t) == 4,
|
|
"Expected wchar_t to be 1, 2, or 4 bytes");
|
|
|
|
template <typename TResult>
|
|
static inline bool ConvertUTF8toWideInternal(llvm::StringRef Source,
|
|
TResult &Result) {
|
|
// Even in the case of UTF-16, the number of bytes in a UTF-8 string is
|
|
// at least as large as the number of elements in the resulting wide
|
|
// string, because surrogate pairs take at least 4 bytes in UTF-8.
|
|
Result.resize(Source.size() + 1);
|
|
char *ResultPtr = reinterpret_cast<char *>(&Result[0]);
|
|
const UTF8 *ErrorPtr;
|
|
if (!ConvertUTF8toWide(sizeof(wchar_t), Source, ResultPtr, ErrorPtr)) {
|
|
Result.clear();
|
|
return false;
|
|
}
|
|
Result.resize(reinterpret_cast<wchar_t *>(ResultPtr) - &Result[0]);
|
|
return true;
|
|
}
|
|
|
|
bool ConvertUTF8toWide(llvm::StringRef Source, std::wstring &Result) {
|
|
return ConvertUTF8toWideInternal(Source, Result);
|
|
}
|
|
|
|
bool ConvertUTF8toWide(const char *Source, std::wstring &Result) {
|
|
if (!Source) {
|
|
Result.clear();
|
|
return true;
|
|
}
|
|
return ConvertUTF8toWide(llvm::StringRef(Source), Result);
|
|
}
|
|
|
|
bool convertWideToUTF8(const std::wstring &Source, std::string &Result) {
|
|
if (sizeof(wchar_t) == 1) {
|
|
const UTF8 *Start = reinterpret_cast<const UTF8 *>(Source.data());
|
|
const UTF8 *End =
|
|
reinterpret_cast<const UTF8 *>(Source.data() + Source.size());
|
|
if (!isLegalUTF8String(&Start, End))
|
|
return false;
|
|
Result.resize(Source.size());
|
|
memcpy(&Result[0], Source.data(), Source.size());
|
|
return true;
|
|
} else if (sizeof(wchar_t) == 2) {
|
|
return convertUTF16ToUTF8String(
|
|
llvm::ArrayRef<UTF16>(reinterpret_cast<const UTF16 *>(Source.data()),
|
|
Source.size()),
|
|
Result);
|
|
} else if (sizeof(wchar_t) == 4) {
|
|
const UTF32 *Start = reinterpret_cast<const UTF32 *>(Source.data());
|
|
const UTF32 *End =
|
|
reinterpret_cast<const UTF32 *>(Source.data() + Source.size());
|
|
Result.resize(UNI_MAX_UTF8_BYTES_PER_CODE_POINT * Source.size());
|
|
UTF8 *ResultPtr = reinterpret_cast<UTF8 *>(&Result[0]);
|
|
UTF8 *ResultEnd = reinterpret_cast<UTF8 *>(&Result[0] + Result.size());
|
|
if (ConvertUTF32toUTF8(&Start, End, &ResultPtr, ResultEnd,
|
|
strictConversion) == conversionOK) {
|
|
Result.resize(reinterpret_cast<char *>(ResultPtr) - &Result[0]);
|
|
return true;
|
|
} else {
|
|
Result.clear();
|
|
return false;
|
|
}
|
|
} else {
|
|
llvm_unreachable(
|
|
"Control should never reach this point; see static_assert further up");
|
|
}
|
|
}
|
|
|
|
} // end namespace llvm
|
|
|