use {
crate::ranges::*,
gfx_hal::{
device::OutOfMemory,
pso::{AllocationError, DescriptorPool as _, DescriptorPoolCreateFlags},
Backend, Device,
},
smallvec::{smallvec, SmallVec},
std::{
collections::{HashMap, VecDeque},
ops::Deref,
},
};
const MIN_SETS: u32 = 64;
const MAX_SETS: u32 = 512;
#[derive(Debug)]
pub struct DescriptorSet<B: Backend> {
raw: B::DescriptorSet,
pool: u64,
ranges: DescriptorRanges,
}
impl<B> DescriptorSet<B>
where
B: Backend,
{
pub fn raw(&self) -> &B::DescriptorSet {
&self.raw
}
pub unsafe fn raw_mut(&mut self) -> &mut B::DescriptorSet {
&mut self.raw
}
}
impl<B> Deref for DescriptorSet<B>
where
B: Backend,
{
type Target = B::DescriptorSet;
fn deref(&self) -> &B::DescriptorSet {
&self.raw
}
}
#[derive(Debug)]
struct Allocation<B: Backend> {
sets: Vec<B::DescriptorSet>,
pools: Vec<u64>,
}
#[derive(derivative::Derivative)]
#[derivative(Debug)]
struct DescriptorPool<B: Backend> {
#[derivative(Debug = "ignore")]
raw: B::DescriptorPool,
size: u32,
free: u32,
freed: u32,
}
unsafe fn allocate_from_pool<B: Backend>(
raw: &mut B::DescriptorPool,
layout: &B::DescriptorSetLayout,
count: u32,
allocation: &mut Vec<B::DescriptorSet>,
) -> Result<(), OutOfMemory> {
let sets_were = allocation.len();
raw.allocate_sets(std::iter::repeat(layout).take(count as usize), allocation)
.map_err(|err| match err {
AllocationError::OutOfHostMemory => OutOfMemory::OutOfHostMemory,
AllocationError::OutOfDeviceMemory => OutOfMemory::OutOfDeviceMemory,
err => {
panic!("Unexpected error: {:?}", err);
}
})?;
assert_eq!(allocation.len(), sets_were + count as usize);
Ok(())
}
#[derive(Debug)]
struct DescriptorBucket<B: Backend> {
pools_offset: u64,
pools: VecDeque<DescriptorPool<B>>,
total: u64,
}
impl<B> DescriptorBucket<B>
where
B: Backend,
{
fn new() -> Self {
DescriptorBucket {
pools_offset: 0,
pools: VecDeque::new(),
total: 0,
}
}
fn new_pool_size(&self, count: u32) -> u32 {
MIN_SETS
.max(count)
.max(self.total.min(MAX_SETS as u64) as u32)
.next_power_of_two()
}
unsafe fn dispose(mut self, device: &B::Device) {
if self.total > 0 {
log::error!("Not all descriptor sets were deallocated");
}
while let Some(pool) = self.pools.pop_front() {
assert!(pool.freed + pool.free <= pool.size);
if pool.freed + pool.free < pool.size {
log::error!(
"Descriptor pool is still in use during allocator disposal. {:?}",
pool
);
} else {
log::trace!("Destroying used up descriptor pool");
device.destroy_descriptor_pool(pool.raw);
self.pools_offset += 1;
}
}
self.pools
.drain(..)
.for_each(|pool| device.destroy_descriptor_pool(pool.raw));
}
unsafe fn allocate(
&mut self,
device: &B::Device,
layout: &B::DescriptorSetLayout,
layout_ranges: DescriptorRanges,
mut count: u32,
allocation: &mut Allocation<B>,
) -> Result<(), OutOfMemory> {
if count == 0 {
return Ok(());
}
for (index, pool) in self.pools.iter_mut().enumerate().rev() {
if pool.free == 0 {
continue;
}
let allocate = pool.free.min(count);
log::trace!("Allocate {} from exising pool", allocate);
allocate_from_pool::<B>(&mut pool.raw, layout, allocate, &mut allocation.sets)?;
allocation.pools.extend(
std::iter::repeat(index as u64 + self.pools_offset).take(allocate as usize),
);
count -= allocate;
pool.free -= allocate;
self.total += allocate as u64;
if count == 0 {
return Ok(());
}
}
while count > 0 {
let size = self.new_pool_size(count);
let pool_ranges = layout_ranges * size;
log::trace!(
"Create new pool with {} sets and {:?} descriptors",
size,
pool_ranges,
);
let raw = device.create_descriptor_pool(
size as usize,
&pool_ranges,
DescriptorPoolCreateFlags::empty(),
)?;
let allocate = size.min(count);
self.pools.push_back(DescriptorPool {
raw,
size,
free: size,
freed: 0,
});
let index = self.pools.len() - 1;
let pool = self.pools.back_mut().unwrap();
allocate_from_pool::<B>(&mut pool.raw, layout, allocate, &mut allocation.sets)?;
allocation.pools.extend(
std::iter::repeat(index as u64 + self.pools_offset).take(allocate as usize),
);
count -= allocate;
pool.free -= allocate;
self.total += allocate as u64;
}
Ok(())
}
unsafe fn free(&mut self, sets: impl IntoIterator<Item = B::DescriptorSet>, pool: u64) {
let pool = &mut self.pools[(pool - self.pools_offset) as usize];
let freed = sets.into_iter().count() as u32;
pool.freed += freed;
self.total -= freed as u64;
log::trace!("Freed {} from descriptor bucket", freed);
}
unsafe fn cleanup(&mut self, device: &B::Device) {
while let Some(pool) = self.pools.pop_front() {
if pool.freed < pool.size {
self.pools.push_front(pool);
break;
}
log::trace!("Destroying used up descriptor pool");
device.destroy_descriptor_pool(pool.raw);
self.pools_offset += 1;
}
}
}
#[derive(Debug)]
pub struct DescriptorAllocator<B: Backend> {
buckets: HashMap<DescriptorRanges, DescriptorBucket<B>>,
allocation: Allocation<B>,
relevant: relevant::Relevant,
total: u64,
}
impl<B> DescriptorAllocator<B>
where
B: Backend,
{
pub fn new() -> Self {
DescriptorAllocator {
buckets: HashMap::new(),
allocation: Allocation {
sets: Vec::new(),
pools: Vec::new(),
},
relevant: relevant::Relevant,
total: 0,
}
}
pub unsafe fn dispose(mut self, device: &B::Device) {
self.buckets
.drain()
.for_each(|(_, bucket)| bucket.dispose(device));
self.relevant.dispose();
}
pub unsafe fn allocate(
&mut self,
device: &B::Device,
layout: &B::DescriptorSetLayout,
layout_ranges: DescriptorRanges,
count: u32,
extend: &mut impl Extend<DescriptorSet<B>>,
) -> Result<(), OutOfMemory> {
if count == 0 {
return Ok(());
}
log::trace!(
"Allocating {} sets with layout {:?} @ {:?}",
count,
layout,
layout_ranges
);
let bucket = self
.buckets
.entry(layout_ranges)
.or_insert_with(|| DescriptorBucket::new());
match bucket.allocate(device, layout, layout_ranges, count, &mut self.allocation) {
Ok(()) => {
extend.extend(
Iterator::zip(
self.allocation.pools.drain(..),
self.allocation.sets.drain(..),
)
.map(|(pool, set)| DescriptorSet {
raw: set,
ranges: layout_ranges,
pool,
}),
);
Ok(())
}
Err(err) => {
let mut last = None;
for (index, pool) in self.allocation.pools.drain(..).enumerate().rev() {
match last {
Some(last) if last == pool => {
}
Some(last) => {
bucket.free(self.allocation.sets.drain(index + 1..), last);
}
None => last = Some(pool),
}
}
if let Some(last) = last {
bucket.free(self.allocation.sets.drain(0..), last);
}
Err(err)
}
}
}
pub unsafe fn free(&mut self, all_sets: impl IntoIterator<Item = DescriptorSet<B>>) {
let mut free: Option<(DescriptorRanges, u64, SmallVec<[B::DescriptorSet; 32]>)> = None;
for set in all_sets {
match &mut free {
slot @ None => {
slot.replace((set.ranges, set.pool, smallvec![set.raw]));
}
Some((ranges, pool, raw_sets)) if *ranges == set.ranges && *pool == set.pool => {
raw_sets.push(set.raw);
}
Some((ranges, pool, raw_sets)) => {
let bucket = self
.buckets
.get_mut(ranges)
.expect("Set should be allocated from this allocator");
debug_assert!(bucket.total >= raw_sets.len() as u64);
bucket.free(raw_sets.drain(), *pool);
*pool = set.pool;
*ranges = set.ranges;
raw_sets.push(set.raw);
}
}
}
if let Some((ranges, pool, raw_sets)) = free {
let bucket = self
.buckets
.get_mut(&ranges)
.expect("Set should be allocated from this allocator");
debug_assert!(bucket.total >= raw_sets.len() as u64);
bucket.free(raw_sets, pool);
}
}
pub unsafe fn cleanup(&mut self, device: &B::Device) {
self.buckets
.values_mut()
.for_each(|bucket| bucket.cleanup(device));
}
}