large object allocator

Heap类的成员函数AllocObject和AllocNonMovableObject使用的分配器类型分别是由成员变量current_allocator_和current_non_moving_allocator_决定的。前者的值与当前使用的GC类型有关。当GC类型发生变化时,就会调用Heap类的成员函数ChangeCollector来修改当前使用的GC,同时也会调用另外一个成员函数ChangeAllocator来修改Heap类的成员变量current_allocator_的值。由于ART运行时只有一个Non-Moving Space,因此后者的值就固定为kAllocatorTypeNonMoving。

 无论是通过AllocObject接口分配对象,还是通过AllocNonMovableObject接口分配对象,最后都统一调用了另外一个接口AllocObjectWithAllocator进行具体的分配过程

Array* Array::Alloc()
Object* Class::Alloc()
mirror::Object* AllocObject()
mirror::Object* AllocNonMovableObject()
String* String::Alloc()
这5个函数都是调用AllocObjectWithAllocator()进行分配的

 首先,如果模板参数kCheckLargeObject等于true,并且要分配的是一个原子类型数组,且该为数组的大小大于预先设置的值,那么忽略掉参数allocator,而是调用Heap类的另外一个成员函数AllocLargeObject直接在Large Object Space中分配内存。

template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Class* klass,
                                                      size_t byte_count, AllocatorType allocator,
                                                      const PreFenceVisitor& pre_fence_visitor) {
  // Need to check that we arent the large object allocator since the large object allocation code
  // path this function. If we didn't check we would have an infinite loop.
  mirror::Object* obj;
  //判断是否是大对象
  if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
    // here will use large object space to allocat
    // call AllocLargeObject() it will go else{}
    obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count,
                                                           pre_fence_visitor);
    if (obj != nullptr) {
      return obj;
    } else {
      // There should be an OOM exception, since we are retrying, clear it.
      self->ClearException();
    }
    // If the large object allocation failed, try to use the normal spaces (main space,
    // non moving space). This can happen if there is significant virtual address space
    // fragmentation.
  }
  AllocationTimer alloc_timer(this, &obj);
  // bytes allocated for the (individual) object.
  size_t bytes_allocated;
  size_t usable_size;
  size_t new_num_bytes_allocated = 0;
  if (allocator == kAllocatorTypeTLAB || allocator == kAllocatorTypeRegionTLAB) {
    byte_count = RoundUp(byte_count, space::BumpPointerSpace::kAlignment);
  }
  // If we have a thread local allocation we don't need to update bytes allocated.
  if ((allocator == kAllocatorTypeTLAB || allocator == kAllocatorTypeRegionTLAB) &&
      byte_count <= self->TlabSize()) {
    obj = self->AllocTlab(byte_count);
    DCHECK(obj != nullptr) << "AllocTlab can't fail";
    obj->SetClass(klass);
    if (kUseBakerOrBrooksReadBarrier) {
      if (kUseBrooksReadBarrier) {
        obj->SetReadBarrierPointer(obj);
      }
      obj->AssertReadBarrierPointer();
    }
    bytes_allocated = byte_count;
    usable_size = bytes_allocated;
    pre_fence_visitor(obj, usable_size);
    QuasiAtomic::ThreadFenceForConstructor();
  } else if (!kInstrumented && allocator == kAllocatorTypeRosAlloc &&
             (obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) &&
             LIKELY(obj != nullptr)) {
    DCHECK(!running_on_valgrind_);
    obj->SetClass(klass);
    if (kUseBakerOrBrooksReadBarrier) {
      if (kUseBrooksReadBarrier) {
        obj->SetReadBarrierPointer(obj);
      }
      obj->AssertReadBarrierPointer();
    }
    usable_size = bytes_allocated;
    pre_fence_visitor(obj, usable_size);
    QuasiAtomic::ThreadFenceForConstructor();
  } else {
    // bytes allocated that takes bulk thread-local buffer allocations into account.
    size_t bytes_tl_bulk_allocated = 0;
    obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
                                              &usable_size, &bytes_tl_bulk_allocated);
    if (UNLIKELY(obj == nullptr)) {
      bool is_current_allocator = allocator == GetCurrentAllocator();
      obj = AllocateInternalWithGc(self, allocator, byte_count, &bytes_allocated, &usable_size,
                                   &bytes_tl_bulk_allocated, &klass);
      if (obj == nullptr) {
        bool after_is_current_allocator = allocator == GetCurrentAllocator();
        // If there is a pending exception, fail the allocation right away since the next one
        // could cause OOM and abort the runtime.
        if (!self->IsExceptionPending() && is_current_allocator && !after_is_current_allocator) {
          // If the allocator changed, we need to restart the allocation.
          return AllocObject<kInstrumented>(self, klass, byte_count, pre_fence_visitor);
        }
        return nullptr;
      }
    }
    DCHECK_GT(bytes_allocated, 0u);
    DCHECK_GT(usable_size, 0u);
    obj->SetClass(klass);
    if (kUseBakerOrBrooksReadBarrier) {
      if (kUseBrooksReadBarrier) {
        obj->SetReadBarrierPointer(obj);
      }
      obj->AssertReadBarrierPointer();
    }
 
  VerifyObject(obj);
  self->VerifyStack();
  return obj;
}

 

inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const {
  // We need to have a zygote space or else our newly allocated large object can end up in the
  // Zygote resulting in it being prematurely freed.
  // We can only do this for primitive objects since large objects will not be within the card table
  // range. This also means that we rely on SetClass not dirtying the object's card.
  return byte_count >= large_object_threshold_ && (c->IsPrimitiveArray() || c->IsStringClass());
}
large_object_threshold_ = 12288 = 12k = 3 kPageSize
enum AllocatorType {  
  kAllocatorTypeBumpPointer,  // Use BumpPointer allocator, has entrypoints.  
  kAllocatorTypeTLAB,  // Use TLAB allocator, has entrypoints.  
  kAllocatorTypeRosAlloc,  // Use RosAlloc allocator, has entrypoints.  
  kAllocatorTypeDlMalloc,  // Use dlmalloc allocator, has entrypoints.  
  kAllocatorTypeNonMoving,  // Special allocator for non moving objects, doesn't have entrypoints.  
  kAllocatorTypeLOS,  // Large object space, also doesn't have entrypoints.  
};  

AllocatorType一共有六个值,它们的含义如下所示:
       kAllocatorTypeBumpPointer:表示在Bump Pointer Space中分配对象。
       kAllocatorTypeTLAB:表示要在由Bump Pointer Space提供的线程局部分配缓冲区中分配对象。
       kAllocatorTypeRosAlloc:表示要在Ros Alloc Space分配对象。
       kAllocatorTypeDlMalloc:表示要在Dl Malloc Space分配对象。
       kAllocatorTypeNonMoving:表示要在Non Moving Space分配对象。
       kAllocatorTypeLOS:表示要在Large Object Space分配对象。

 

看来不管是哪种allocator都是调用TryToAllocate

template <const bool kInstrumented, const bool kGrow>
inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator_type,
                                           size_t alloc_size, size_t* bytes_allocated,
                                           size_t* usable_size,
                                           size_t* bytes_tl_bulk_allocated) {
  mirror::Object* ret;
  switch (allocator_type) {
    case kAllocatorTypeBumpPointer: {
      ......................
      break;
    }
    case kAllocatorTypeRosAlloc: {
      ................
      break;
    }
    case kAllocatorTypeDlMalloc: {
      ............
      break;
    }
    case kAllocatorTypeNonMoving: {
      ret = non_moving_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
                                     bytes_tl_bulk_allocated);
      break;
    }
    case kAllocatorTypeLOS: {
      ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
                                       bytes_tl_bulk_allocated);
      // Note that the bump pointer spaces aren't necessarily next to
      // the other continuous spaces like the non-moving alloc space or
      // the zygote space.
      DCHECK(ret == nullptr || large_object_space_->Contains(ret));
      break;
    }
    case kAllocatorTypeTLAB: {
      .....................
      break;
    }
    case kAllocatorTypeRegion: {
      ..................
      break;
    }
    case kAllocatorTypeRegionTLAB: {
      ..............
      break;
    }
    default: {
      LOG(FATAL) << "Invalid allocator type";
      ret = nullptr;
    }
  }
  return ret;
}

 

mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
                                           size_t* bytes_allocated, size_t* usable_size,
                                           size_t* bytes_tl_bulk_allocated) {
  std::string error_msg;
  MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", nullptr, num_bytes,
                                         PROT_READ | PROT_WRITE, true, false, &error_msg);
  if (UNLIKELY(mem_map == nullptr)) {
    LOG(WARNING) << "Large object allocation failed: " << error_msg;
    return nullptr;
  }
  mirror::Object* const obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
  if (kIsDebugBuild) {
    ReaderMutexLock mu2(Thread::Current(), *Locks::heap_bitmap_lock_);
    auto* heap = Runtime::Current()->GetHeap();
    auto* live_bitmap = heap->GetLiveBitmap();
    auto* space_bitmap = live_bitmap->GetContinuousSpaceBitmap(obj);
    CHECK(space_bitmap == nullptr) << obj << " overlaps with bitmap " << *space_bitmap;
    auto* obj_end = reinterpret_cast<mirror::Object*>(mem_map->End());
    space_bitmap = live_bitmap->GetContinuousSpaceBitmap(obj_end - 1);
    CHECK(space_bitmap == nullptr) << obj_end << " overlaps with bitmap " << *space_bitmap;
  }
  MutexLock mu(self, lock_);
  large_objects_.Put(obj, LargeObject {mem_map, false /* not zygote */});
  const size_t allocation_size = mem_map->BaseSize();
  DCHECK(bytes_allocated != nullptr);
  begin_ = std::min(begin_, reinterpret_cast<uint8_t*>(obj));
  uint8_t* obj_end = reinterpret_cast<uint8_t*>(obj) + allocation_size;
  if (end_ == nullptr || obj_end > end_) {
    end_ = obj_end;
  }
  *bytes_allocated = allocation_size;
  if (usable_size != nullptr) {
    *usable_size = allocation_size;
  }
  DCHECK(bytes_tl_bulk_allocated != nullptr);
  *bytes_tl_bulk_allocated = allocation_size;
  num_bytes_allocated_ += allocation_size;
  total_bytes_allocated_ += allocation_size;
  ++num_objects_allocated_;
  ++total_objects_allocated_;
  return obj;
}

 

posted @ 2016-03-12 15:33  牧 天  阅读(997)  评论(0)    收藏  举报