1pub mod entry;
2
3mod allocation;
4mod allocator;
5mod layer;
6
7pub use allocation::Allocation;
8pub use entry::Entry;
9pub use layer::Layer;
10
11use allocator::Allocator;
12
13pub const DEFAULT_SIZE: u32 = 2048;
14pub const MAX_SIZE: u32 = 2048;
15
16use crate::core::Size;
17use crate::graphics::color;
18
19use std::sync::Arc;
20
21#[derive(Debug)]
22pub struct Atlas {
23 size: u32,
24 backend: wgpu::Backend,
25 texture: wgpu::Texture,
26 texture_view: wgpu::TextureView,
27 texture_bind_group: Arc<wgpu::BindGroup>,
28 texture_layout: wgpu::BindGroupLayout,
29 layers: Vec<Layer>,
30}
31
32impl Atlas {
33 pub fn new(
34 device: &wgpu::Device,
35 backend: wgpu::Backend,
36 texture_layout: wgpu::BindGroupLayout,
37 ) -> Self {
38 Self::with_size(device, backend, texture_layout, DEFAULT_SIZE)
39 }
40
41 pub fn with_size(
42 device: &wgpu::Device,
43 backend: wgpu::Backend,
44 texture_layout: wgpu::BindGroupLayout,
45 size: u32,
46 ) -> Self {
47 let size = size.min(MAX_SIZE);
48
49 let layers = match backend {
50 wgpu::Backend::Gl => vec![Layer::Empty, Layer::Empty],
54 _ => vec![Layer::Empty],
55 };
56
57 let extent = wgpu::Extent3d {
58 width: size,
59 height: size,
60 depth_or_array_layers: layers.len() as u32,
61 };
62
63 let texture = device.create_texture(&wgpu::TextureDescriptor {
64 label: Some("iced_wgpu::image texture atlas"),
65 size: extent,
66 mip_level_count: 1,
67 sample_count: 1,
68 dimension: wgpu::TextureDimension::D2,
69 format: if color::GAMMA_CORRECTION {
70 wgpu::TextureFormat::Rgba8UnormSrgb
71 } else {
72 wgpu::TextureFormat::Rgba8Unorm
73 },
74 usage: wgpu::TextureUsages::COPY_DST
75 | wgpu::TextureUsages::COPY_SRC
76 | wgpu::TextureUsages::TEXTURE_BINDING,
77 view_formats: &[],
78 });
79
80 let texture_view = texture.create_view(&wgpu::TextureViewDescriptor {
81 dimension: Some(wgpu::TextureViewDimension::D2Array),
82 ..Default::default()
83 });
84
85 let texture_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
86 label: Some("iced_wgpu::image texture atlas bind group"),
87 layout: &texture_layout,
88 entries: &[wgpu::BindGroupEntry {
89 binding: 0,
90 resource: wgpu::BindingResource::TextureView(&texture_view),
91 }],
92 });
93
94 Atlas {
95 size,
96 backend,
97 texture,
98 texture_view,
99 texture_bind_group: Arc::new(texture_bind_group),
100 texture_layout,
101 layers,
102 }
103 }
104
105 pub fn bind_group(&self) -> &Arc<wgpu::BindGroup> {
106 &self.texture_bind_group
107 }
108
109 pub fn upload(
110 &mut self,
111 device: &wgpu::Device,
112 encoder: &mut wgpu::CommandEncoder,
113 belt: &mut wgpu::util::StagingBelt,
114 width: u32,
115 height: u32,
116 pixels: &[u8],
117 ) -> Option<Entry> {
118 let entry = {
119 let current_size = self.layers.len();
120 let entry = self.allocate(width, height)?;
121
122 let new_layers = self.layers.len() - current_size;
124 self.grow(new_layers, device, encoder, self.backend);
125
126 entry
127 };
128
129 log::debug!("Allocated atlas entry: {entry:?}");
130
131 match &entry {
132 Entry::Contiguous(allocation) => {
133 self.upload_allocation(pixels, width, 0, allocation, device, encoder, belt);
134 }
135 Entry::Fragmented { fragments, .. } => {
136 for fragment in fragments {
137 let (x, y) = fragment.position;
138 let offset = 4 * (y * width + x) as usize;
139
140 self.upload_allocation(
141 pixels,
142 width,
143 offset,
144 &fragment.allocation,
145 device,
146 encoder,
147 belt,
148 );
149 }
150 }
151 }
152
153 if log::log_enabled!(log::Level::Debug) {
154 log::debug!(
155 "Atlas layers: {} (busy: {}, allocations: {})",
156 self.layers.len(),
157 self.layers.iter().filter(|layer| !layer.is_empty()).count(),
158 self.layers.iter().map(Layer::allocations).sum::<usize>(),
159 );
160 }
161
162 Some(entry)
163 }
164
165 pub fn remove(&mut self, entry: &Entry) {
166 log::debug!("Removing atlas entry: {entry:?}");
167
168 match entry {
169 Entry::Contiguous(allocation) => {
170 self.deallocate(allocation);
171 }
172 Entry::Fragmented { fragments, .. } => {
173 for fragment in fragments {
174 self.deallocate(&fragment.allocation);
175 }
176 }
177 }
178 }
179
180 fn allocate(&mut self, width: u32, height: u32) -> Option<Entry> {
181 if width == self.size && height == self.size {
183 let mut empty_layers = self
184 .layers
185 .iter_mut()
186 .enumerate()
187 .filter(|(_, layer)| layer.is_empty());
188
189 if let Some((i, layer)) = empty_layers.next() {
190 *layer = Layer::Full;
191
192 return Some(Entry::Contiguous(Allocation::Full {
193 layer: i,
194 size: self.size,
195 }));
196 }
197
198 self.layers.push(Layer::Full);
199
200 return Some(Entry::Contiguous(Allocation::Full {
201 layer: self.layers.len() - 1,
202 size: self.size,
203 }));
204 }
205
206 if width > self.size || height > self.size {
208 let mut fragments = Vec::new();
209 let mut y = 0;
210
211 while y < height {
212 let height = std::cmp::min(height - y, self.size);
213 let mut x = 0;
214
215 while x < width {
216 let width = std::cmp::min(width - x, self.size);
217
218 let allocation = self.allocate(width, height)?;
219
220 if let Entry::Contiguous(allocation) = allocation {
221 fragments.push(entry::Fragment {
222 position: (x, y),
223 allocation,
224 });
225 }
226
227 x += width;
228 }
229
230 y += height;
231 }
232
233 return Some(Entry::Fragmented {
234 size: Size::new(width, height),
235 fragments,
236 });
237 }
238
239 for (i, layer) in self.layers.iter_mut().enumerate() {
241 match layer {
242 Layer::Empty => {
243 let mut allocator = Allocator::new(self.size);
244
245 if let Some(region) = allocator.allocate(width, height) {
246 *layer = Layer::Busy(allocator);
247
248 return Some(Entry::Contiguous(Allocation::Partial {
249 region,
250 layer: i,
251 atlas_size: self.size,
252 }));
253 }
254 }
255 Layer::Busy(allocator) => {
256 if let Some(region) = allocator.allocate(width, height) {
257 return Some(Entry::Contiguous(Allocation::Partial {
258 region,
259 layer: i,
260 atlas_size: self.size,
261 }));
262 }
263 }
264 Layer::Full => {}
265 }
266 }
267
268 let mut allocator = Allocator::new(self.size);
270
271 if let Some(region) = allocator.allocate(width, height) {
272 self.layers.push(Layer::Busy(allocator));
273
274 return Some(Entry::Contiguous(Allocation::Partial {
275 region,
276 layer: self.layers.len() - 1,
277 atlas_size: self.size,
278 }));
279 }
280
281 None
283 }
284
285 fn deallocate(&mut self, allocation: &Allocation) {
286 log::debug!("Deallocating atlas: {allocation:?}");
287
288 match allocation {
289 Allocation::Full { layer, .. } => {
290 self.layers[*layer] = Layer::Empty;
291 }
292 Allocation::Partial { layer, region, .. } => {
293 let layer = &mut self.layers[*layer];
294
295 if let Layer::Busy(allocator) = layer {
296 allocator.deallocate(region);
297
298 if allocator.is_empty() {
299 *layer = Layer::Empty;
300 }
301 }
302 }
303 }
304 }
305
306 fn upload_allocation(
307 &self,
308 pixels: &[u8],
309 image_width: u32,
310 offset: usize,
311 allocation: &Allocation,
312 device: &wgpu::Device,
313 encoder: &mut wgpu::CommandEncoder,
314 belt: &mut wgpu::util::StagingBelt,
315 ) {
316 let (x, y) = allocation.position();
317 let Size { width, height } = allocation.size();
318 let layer = allocation.layer();
319 let padding = allocation.padding();
320
321 let bytes_per_row = (4 * (width + padding.width * 2))
326 .next_multiple_of(wgpu::COPY_BYTES_PER_ROW_ALIGNMENT)
327 as usize;
328 let total_bytes = bytes_per_row * (height + padding.height * 2) as usize;
329
330 let buffer_slice = belt.allocate(
331 wgpu::BufferSize::new(total_bytes as u64).unwrap(),
332 wgpu::BufferSize::new(8 * 4).unwrap(),
333 device,
334 );
335
336 const PIXEL: usize = 4;
337
338 let mut fragment = buffer_slice.get_mapped_range_mut();
339 let w = width as usize;
340 let h = height as usize;
341 let pad_w = padding.width as usize;
342 let pad_h = padding.height as usize;
343 let stride = PIXEL * w;
344
345 for row in 0..h {
347 let src = offset + row * PIXEL * image_width as usize;
348 let dst = (row + pad_h) * bytes_per_row;
349
350 fragment[dst + PIXEL * pad_w..dst + PIXEL * pad_w + stride]
351 .copy_from_slice(&pixels[src..src + stride]);
352
353 for i in 0..pad_w {
355 fragment[dst + PIXEL * i..dst + PIXEL * (i + 1)]
356 .copy_from_slice(&pixels[src..src + PIXEL]);
357
358 fragment
359 [dst + stride + PIXEL * (pad_w + i)..dst + stride + PIXEL * (pad_w + i + 1)]
360 .copy_from_slice(&pixels[src + stride - PIXEL..src + stride]);
361 }
362 }
363
364 for row in 0..pad_h {
366 let dst_top = row * bytes_per_row;
367 let dst_bottom = (pad_h + h + row) * bytes_per_row;
368 let src_top = offset;
369 let src_bottom = offset + (h - 1) * PIXEL * image_width as usize;
370
371 fragment[dst_top + PIXEL * pad_w..dst_top + PIXEL * (pad_w + w)]
373 .copy_from_slice(&pixels[src_top..src_top + PIXEL * w]);
374
375 fragment[dst_bottom + PIXEL * pad_w..dst_bottom + PIXEL * (pad_w + w)]
377 .copy_from_slice(&pixels[src_bottom..src_bottom + PIXEL * w]);
378
379 for i in 0..pad_w {
381 fragment[dst_top + PIXEL * i..dst_top + PIXEL * (i + 1)]
383 .copy_from_slice(&pixels[offset..offset + PIXEL]);
384
385 fragment[dst_top + PIXEL * (w + pad_w + i)..dst_top + PIXEL * (w + pad_w + i + 1)]
387 .copy_from_slice(&pixels[offset + PIXEL * (w - 1)..offset + PIXEL * w]);
388
389 fragment[dst_bottom + PIXEL * i..dst_bottom + PIXEL * (i + 1)]
391 .copy_from_slice(&pixels[src_bottom..src_bottom + PIXEL]);
392
393 fragment[dst_bottom + PIXEL * (w + pad_w + i)
395 ..dst_bottom + PIXEL * (w + pad_w + i + 1)]
396 .copy_from_slice(&pixels[src_bottom + PIXEL * (w - 1)..src_bottom + PIXEL * w]);
397 }
398 }
399
400 encoder.copy_buffer_to_texture(
402 wgpu::TexelCopyBufferInfo {
403 buffer: buffer_slice.buffer(),
404 layout: wgpu::TexelCopyBufferLayout {
405 offset: buffer_slice.offset(),
406 bytes_per_row: Some(bytes_per_row as u32),
407 rows_per_image: Some(height + padding.height * 2),
408 },
409 },
410 wgpu::TexelCopyTextureInfo {
411 texture: &self.texture,
412 mip_level: 0,
413 origin: wgpu::Origin3d {
414 x: x - padding.width,
415 y: y - padding.height,
416 z: layer as u32,
417 },
418 aspect: wgpu::TextureAspect::default(),
419 },
420 wgpu::Extent3d {
421 width: width + padding.width * 2,
422 height: height + padding.height * 2,
423 depth_or_array_layers: 1,
424 },
425 );
426 }
427
428 fn grow(
429 &mut self,
430 amount: usize,
431 device: &wgpu::Device,
432 encoder: &mut wgpu::CommandEncoder,
433 backend: wgpu::Backend,
434 ) {
435 if amount == 0 {
436 return;
437 }
438
439 let depth_or_array_layers = match backend {
445 wgpu::Backend::Gl if self.layers.len().is_multiple_of(6) => {
446 self.layers.len() as u32 + 1
447 }
448 _ => self.layers.len() as u32,
449 };
450
451 let new_texture = device.create_texture(&wgpu::TextureDescriptor {
452 label: Some("iced_wgpu::image texture atlas"),
453 size: wgpu::Extent3d {
454 width: self.size,
455 height: self.size,
456 depth_or_array_layers,
457 },
458 mip_level_count: 1,
459 sample_count: 1,
460 dimension: wgpu::TextureDimension::D2,
461 format: if color::GAMMA_CORRECTION {
462 wgpu::TextureFormat::Rgba8UnormSrgb
463 } else {
464 wgpu::TextureFormat::Rgba8Unorm
465 },
466 usage: wgpu::TextureUsages::COPY_DST
467 | wgpu::TextureUsages::COPY_SRC
468 | wgpu::TextureUsages::TEXTURE_BINDING,
469 view_formats: &[],
470 });
471
472 let amount_to_copy = self.layers.len() - amount;
473
474 for (i, layer) in self.layers.iter_mut().take(amount_to_copy).enumerate() {
475 if layer.is_empty() {
476 continue;
477 }
478
479 encoder.copy_texture_to_texture(
480 wgpu::TexelCopyTextureInfo {
481 texture: &self.texture,
482 mip_level: 0,
483 origin: wgpu::Origin3d {
484 x: 0,
485 y: 0,
486 z: i as u32,
487 },
488 aspect: wgpu::TextureAspect::default(),
489 },
490 wgpu::TexelCopyTextureInfo {
491 texture: &new_texture,
492 mip_level: 0,
493 origin: wgpu::Origin3d {
494 x: 0,
495 y: 0,
496 z: i as u32,
497 },
498 aspect: wgpu::TextureAspect::default(),
499 },
500 wgpu::Extent3d {
501 width: self.size,
502 height: self.size,
503 depth_or_array_layers: 1,
504 },
505 );
506 }
507
508 self.texture = new_texture;
509 self.texture_view = self.texture.create_view(&wgpu::TextureViewDescriptor {
510 dimension: Some(wgpu::TextureViewDimension::D2Array),
511 ..Default::default()
512 });
513
514 self.texture_bind_group = Arc::new(device.create_bind_group(&wgpu::BindGroupDescriptor {
515 label: Some("iced_wgpu::image texture atlas bind group"),
516 layout: &self.texture_layout,
517 entries: &[wgpu::BindGroupEntry {
518 binding: 0,
519 resource: wgpu::BindingResource::TextureView(&self.texture_view),
520 }],
521 }));
522 }
523}