Files
llvm-project/compiler-rt/lib/scudo/scudo_allocator_secondary.h
Kostya Kortchinsky 1148dc5274 [scudo] 32-bit and hardware agnostic support
Summary:
This update introduces i386 support for the Scudo Hardened Allocator, and
offers software alternatives for functions that used to require hardware
specific instruction sets. This should make porting to new architectures
easier.

Among the changes:
- The chunk header has been changed to accomodate the size limitations
  encountered on 32-bit architectures. We now fit everything in 64-bit. This
  was achieved by storing the amount of unused bytes in an allocation rather
  than the size itself, as one can be deduced from the other with the help
  of the GetActuallyAllocatedSize function. As it turns out, this header can
  be used for both 64 and 32 bit, and as such we dropped the requirement for
  the 128-bit compare and exchange instruction support (cmpxchg16b).
- Add 32-bit support for the checksum and the PRNG functions: if the SSE 4.2
  instruction set is supported, use the 32-bit CRC32 instruction, and in the
  XorShift128, use a 32-bit based state instead of 64-bit.
- Add software support for CRC32: if SSE 4.2 is not supported, fallback on a
  software implementation.
- Modify tests that were not 32-bit compliant, and expand them to cover more
  allocation and alignment sizes. The random shuffle test has been deactivated
  for linux-i386 & linux-i686 as the 32-bit sanitizer allocator doesn't
  currently randomize chunks.

Reviewers: alekseyshl, kcc

Subscribers: filcab, llvm-commits, tberghammer, danalbert, srhines, mgorny, modocache

Differential Revision: https://reviews.llvm.org/D26358

llvm-svn: 288255
2016-11-30 17:32:20 +00:00

181 lines
6.1 KiB
C++

//===-- scudo_allocator_secondary.h -----------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// Scudo Secondary Allocator.
/// This services allocation that are too large to be serviced by the Primary
/// Allocator. It is directly backed by the memory mapping functions of the
/// operating system.
///
//===----------------------------------------------------------------------===//
#ifndef SCUDO_ALLOCATOR_SECONDARY_H_
#define SCUDO_ALLOCATOR_SECONDARY_H_
#ifndef SCUDO_ALLOCATOR_H_
# error "This file must be included inside scudo_allocator.h."
#endif
class ScudoLargeMmapAllocator {
public:
void Init(bool AllocatorMayReturnNull) {
PageSize = GetPageSizeCached();
atomic_store(&MayReturnNull, AllocatorMayReturnNull, memory_order_relaxed);
}
void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
// The Scudo frontend prevents us from allocating more than
// MaxAllowedMallocSize, so integer overflow checks would be superfluous.
uptr HeadersSize = sizeof(SecondaryHeader) + AlignedChunkHeaderSize;
uptr MapSize = RoundUpTo(Size + sizeof(SecondaryHeader), PageSize);
// Account for 2 guard pages, one before and one after the chunk.
MapSize += 2 * PageSize;
// Adding an extra Alignment is not required, it was done by the frontend.
uptr MapBeg = reinterpret_cast<uptr>(MmapNoAccess(MapSize));
if (MapBeg == ~static_cast<uptr>(0))
return ReturnNullOrDieOnOOM();
// A page-aligned pointer is assumed after that, so check it now.
CHECK(IsAligned(MapBeg, PageSize));
uptr MapEnd = MapBeg + MapSize;
uptr UserBeg = MapBeg + PageSize + HeadersSize;
// In the event of larger alignments, we will attempt to fit the mmap area
// better and unmap extraneous memory. This will also ensure that the
// offset field of the header stays small (it will always be 0).
if (Alignment > MinAlignment) {
if (UserBeg & (Alignment - 1))
UserBeg += Alignment - (UserBeg & (Alignment - 1));
CHECK_GE(UserBeg, MapBeg);
uptr NewMapBeg = UserBeg - HeadersSize;
NewMapBeg = RoundDownTo(NewMapBeg, PageSize) - PageSize;
CHECK_GE(NewMapBeg, MapBeg);
uptr NewMapSize = RoundUpTo(MapSize - Alignment, PageSize);
uptr NewMapEnd = NewMapBeg + NewMapSize;
CHECK_LE(NewMapEnd, MapEnd);
// Unmap the extra memory if it's large enough.
uptr Diff = NewMapBeg - MapBeg;
if (Diff > PageSize)
UnmapOrDie(reinterpret_cast<void *>(MapBeg), Diff);
Diff = MapEnd - NewMapEnd;
if (Diff > PageSize)
UnmapOrDie(reinterpret_cast<void *>(NewMapEnd), Diff);
MapBeg = NewMapBeg;
MapSize = NewMapSize;
MapEnd = NewMapEnd;
}
uptr UserEnd = UserBeg - AlignedChunkHeaderSize + Size;
// For larger alignments, Alignment was added by the frontend to Size.
if (Alignment > MinAlignment)
UserEnd -= Alignment;
CHECK_LE(UserEnd, MapEnd - PageSize);
CHECK_EQ(MapBeg + PageSize, reinterpret_cast<uptr>(
MmapFixedOrDie(MapBeg + PageSize, MapSize - 2 * PageSize)));
uptr Ptr = UserBeg - AlignedChunkHeaderSize;
SecondaryHeader *Header = getHeader(Ptr);
Header->MapBeg = MapBeg;
Header->MapSize = MapSize;
// The primary adds the whole class size to the stats when allocating a
// chunk, so we will do something similar here. But we will not account for
// the guard pages.
Stats->Add(AllocatorStatAllocated, MapSize - 2 * PageSize);
Stats->Add(AllocatorStatMapped, MapSize - 2 * PageSize);
CHECK(IsAligned(UserBeg, Alignment));
return reinterpret_cast<void *>(UserBeg);
}
void *ReturnNullOrDieOnBadRequest() {
if (atomic_load(&MayReturnNull, memory_order_acquire))
return nullptr;
ReportAllocatorCannotReturnNull(false);
}
void *ReturnNullOrDieOnOOM() {
if (atomic_load(&MayReturnNull, memory_order_acquire))
return nullptr;
ReportAllocatorCannotReturnNull(true);
}
void SetMayReturnNull(bool AllocatorMayReturnNull) {
atomic_store(&MayReturnNull, AllocatorMayReturnNull, memory_order_release);
}
void Deallocate(AllocatorStats *Stats, void *Ptr) {
SecondaryHeader *Header = getHeader(Ptr);
Stats->Sub(AllocatorStatAllocated, Header->MapSize - 2 * PageSize);
Stats->Sub(AllocatorStatMapped, Header->MapSize - 2 * PageSize);
UnmapOrDie(reinterpret_cast<void *>(Header->MapBeg), Header->MapSize);
}
uptr TotalMemoryUsed() {
UNIMPLEMENTED();
}
bool PointerIsMine(const void *Ptr) {
UNIMPLEMENTED();
}
uptr GetActuallyAllocatedSize(void *Ptr) {
SecondaryHeader *Header = getHeader(Ptr);
// Deduct PageSize as MapEnd includes the trailing guard page.
uptr MapEnd = Header->MapBeg + Header->MapSize - PageSize;
return MapEnd - reinterpret_cast<uptr>(Ptr);
}
void *GetMetaData(const void *Ptr) {
UNIMPLEMENTED();
}
void *GetBlockBegin(const void *Ptr) {
UNIMPLEMENTED();
}
void *GetBlockBeginFastLocked(void *Ptr) {
UNIMPLEMENTED();
}
void PrintStats() {
UNIMPLEMENTED();
}
void ForceLock() {
UNIMPLEMENTED();
}
void ForceUnlock() {
UNIMPLEMENTED();
}
void ForEachChunk(ForEachChunkCallback Callback, void *Arg) {
UNIMPLEMENTED();
}
private:
// A Secondary allocated chunk header contains the base of the mapping and
// its size. Currently, the base is always a page before the header, but
// we might want to extend that number in the future based on the size of
// the allocation.
struct SecondaryHeader {
uptr MapBeg;
uptr MapSize;
};
// Check that sizeof(SecondaryHeader) is a multiple of MinAlignment.
COMPILER_CHECK((sizeof(SecondaryHeader) & (MinAlignment - 1)) == 0);
SecondaryHeader *getHeader(uptr Ptr) {
return reinterpret_cast<SecondaryHeader*>(Ptr - sizeof(SecondaryHeader));
}
SecondaryHeader *getHeader(const void *Ptr) {
return getHeader(reinterpret_cast<uptr>(Ptr));
}
uptr PageSize;
atomic_uint8_t MayReturnNull;
};
#endif // SCUDO_ALLOCATOR_SECONDARY_H_