iced_wgpu/image/
atlas.rs

1pub mod entry;
2
3mod allocation;
4mod allocator;
5mod layer;
6
7pub use allocation::Allocation;
8pub use entry::Entry;
9pub use layer::Layer;
10
11use allocator::Allocator;
12
13pub const SIZE: u32 = 2048;
14
15use crate::core::Size;
16use crate::graphics::color;
17
18use std::sync::Arc;
19
20#[derive(Debug)]
21pub struct Atlas {
22    backend: wgpu::Backend,
23    texture: wgpu::Texture,
24    texture_view: wgpu::TextureView,
25    texture_bind_group: wgpu::BindGroup,
26    texture_layout: Arc<wgpu::BindGroupLayout>,
27    layers: Vec<Layer>,
28}
29
30impl Atlas {
31    pub fn new(
32        device: &wgpu::Device,
33        backend: wgpu::Backend,
34        texture_layout: Arc<wgpu::BindGroupLayout>,
35    ) -> Self {
36        let layers = match backend {
37            // On the GL backend we start with 2 layers, to help wgpu figure
38            // out that this texture is `GL_TEXTURE_2D_ARRAY` rather than `GL_TEXTURE_2D`
39            // https://github.com/gfx-rs/wgpu/blob/004e3efe84a320d9331371ed31fa50baa2414911/wgpu-hal/src/gles/mod.rs#L371
40            wgpu::Backend::Gl => vec![Layer::Empty, Layer::Empty],
41            _ => vec![Layer::Empty],
42        };
43
44        let extent = wgpu::Extent3d {
45            width: SIZE,
46            height: SIZE,
47            depth_or_array_layers: layers.len() as u32,
48        };
49
50        let texture = device.create_texture(&wgpu::TextureDescriptor {
51            label: Some("iced_wgpu::image texture atlas"),
52            size: extent,
53            mip_level_count: 1,
54            sample_count: 1,
55            dimension: wgpu::TextureDimension::D2,
56            format: if color::GAMMA_CORRECTION {
57                wgpu::TextureFormat::Rgba8UnormSrgb
58            } else {
59                wgpu::TextureFormat::Rgba8Unorm
60            },
61            usage: wgpu::TextureUsages::COPY_DST
62                | wgpu::TextureUsages::COPY_SRC
63                | wgpu::TextureUsages::TEXTURE_BINDING,
64            view_formats: &[],
65        });
66
67        let texture_view = texture.create_view(&wgpu::TextureViewDescriptor {
68            dimension: Some(wgpu::TextureViewDimension::D2Array),
69            ..Default::default()
70        });
71
72        let texture_bind_group =
73            device.create_bind_group(&wgpu::BindGroupDescriptor {
74                label: Some("iced_wgpu::image texture atlas bind group"),
75                layout: &texture_layout,
76                entries: &[wgpu::BindGroupEntry {
77                    binding: 0,
78                    resource: wgpu::BindingResource::TextureView(&texture_view),
79                }],
80            });
81
82        Atlas {
83            backend,
84            texture,
85            texture_view,
86            texture_bind_group,
87            texture_layout,
88            layers,
89        }
90    }
91
92    pub fn bind_group(&self) -> &wgpu::BindGroup {
93        &self.texture_bind_group
94    }
95
96    pub fn layer_count(&self) -> usize {
97        self.layers.len()
98    }
99
100    pub fn upload(
101        &mut self,
102        device: &wgpu::Device,
103        encoder: &mut wgpu::CommandEncoder,
104        width: u32,
105        height: u32,
106        data: &[u8],
107    ) -> Option<Entry> {
108        let entry = {
109            let current_size = self.layers.len();
110            let entry = self.allocate(width, height)?;
111
112            // We grow the internal texture after allocating if necessary
113            let new_layers = self.layers.len() - current_size;
114            self.grow(new_layers, device, encoder);
115
116            entry
117        };
118
119        log::debug!("Allocated atlas entry: {entry:?}");
120
121        // It is a webgpu requirement that:
122        //   BufferCopyView.layout.bytes_per_row % wgpu::COPY_BYTES_PER_ROW_ALIGNMENT == 0
123        // So we calculate padded_width by rounding width up to the next
124        // multiple of wgpu::COPY_BYTES_PER_ROW_ALIGNMENT.
125        let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT;
126        let padding = (align - (4 * width) % align) % align;
127        let padded_width = (4 * width + padding) as usize;
128        let padded_data_size = padded_width * height as usize;
129
130        let mut padded_data = vec![0; padded_data_size];
131
132        for row in 0..height as usize {
133            let offset = row * padded_width;
134
135            padded_data[offset..offset + 4 * width as usize].copy_from_slice(
136                &data[row * 4 * width as usize..(row + 1) * 4 * width as usize],
137            );
138        }
139
140        match &entry {
141            Entry::Contiguous(allocation) => {
142                self.upload_allocation(
143                    &padded_data,
144                    width,
145                    height,
146                    padding,
147                    0,
148                    allocation,
149                    device,
150                    encoder,
151                );
152            }
153            Entry::Fragmented { fragments, .. } => {
154                for fragment in fragments {
155                    let (x, y) = fragment.position;
156                    let offset = (y * padded_width as u32 + 4 * x) as usize;
157
158                    self.upload_allocation(
159                        &padded_data,
160                        width,
161                        height,
162                        padding,
163                        offset,
164                        &fragment.allocation,
165                        device,
166                        encoder,
167                    );
168                }
169            }
170        }
171
172        if log::log_enabled!(log::Level::Debug) {
173            log::debug!(
174                "Atlas layers: {} (busy: {}, allocations: {})",
175                self.layer_count(),
176                self.layers.iter().filter(|layer| !layer.is_empty()).count(),
177                self.layers.iter().map(Layer::allocations).sum::<usize>(),
178            );
179        }
180
181        Some(entry)
182    }
183
184    pub fn remove(&mut self, entry: &Entry) {
185        log::debug!("Removing atlas entry: {entry:?}");
186
187        match entry {
188            Entry::Contiguous(allocation) => {
189                self.deallocate(allocation);
190            }
191            Entry::Fragmented { fragments, .. } => {
192                for fragment in fragments {
193                    self.deallocate(&fragment.allocation);
194                }
195            }
196        }
197    }
198
199    fn allocate(&mut self, width: u32, height: u32) -> Option<Entry> {
200        // Allocate one layer if texture fits perfectly
201        if width == SIZE && height == SIZE {
202            let mut empty_layers = self
203                .layers
204                .iter_mut()
205                .enumerate()
206                .filter(|(_, layer)| layer.is_empty());
207
208            if let Some((i, layer)) = empty_layers.next() {
209                *layer = Layer::Full;
210
211                return Some(Entry::Contiguous(Allocation::Full { layer: i }));
212            }
213
214            self.layers.push(Layer::Full);
215
216            return Some(Entry::Contiguous(Allocation::Full {
217                layer: self.layers.len() - 1,
218            }));
219        }
220
221        // Split big textures across multiple layers
222        if width > SIZE || height > SIZE {
223            let mut fragments = Vec::new();
224            let mut y = 0;
225
226            while y < height {
227                let height = std::cmp::min(height - y, SIZE);
228                let mut x = 0;
229
230                while x < width {
231                    let width = std::cmp::min(width - x, SIZE);
232
233                    let allocation = self.allocate(width, height)?;
234
235                    if let Entry::Contiguous(allocation) = allocation {
236                        fragments.push(entry::Fragment {
237                            position: (x, y),
238                            allocation,
239                        });
240                    }
241
242                    x += width;
243                }
244
245                y += height;
246            }
247
248            return Some(Entry::Fragmented {
249                size: Size::new(width, height),
250                fragments,
251            });
252        }
253
254        // Try allocating on an existing layer
255        for (i, layer) in self.layers.iter_mut().enumerate() {
256            match layer {
257                Layer::Empty => {
258                    let mut allocator = Allocator::new(SIZE);
259
260                    if let Some(region) = allocator.allocate(width, height) {
261                        *layer = Layer::Busy(allocator);
262
263                        return Some(Entry::Contiguous(Allocation::Partial {
264                            region,
265                            layer: i,
266                        }));
267                    }
268                }
269                Layer::Busy(allocator) => {
270                    if let Some(region) = allocator.allocate(width, height) {
271                        return Some(Entry::Contiguous(Allocation::Partial {
272                            region,
273                            layer: i,
274                        }));
275                    }
276                }
277                Layer::Full => {}
278            }
279        }
280
281        // Create new layer with atlas allocator
282        let mut allocator = Allocator::new(SIZE);
283
284        if let Some(region) = allocator.allocate(width, height) {
285            self.layers.push(Layer::Busy(allocator));
286
287            return Some(Entry::Contiguous(Allocation::Partial {
288                region,
289                layer: self.layers.len() - 1,
290            }));
291        }
292
293        // We ran out of memory (?)
294        None
295    }
296
297    fn deallocate(&mut self, allocation: &Allocation) {
298        log::debug!("Deallocating atlas: {allocation:?}");
299
300        match allocation {
301            Allocation::Full { layer } => {
302                self.layers[*layer] = Layer::Empty;
303            }
304            Allocation::Partial { layer, region } => {
305                let layer = &mut self.layers[*layer];
306
307                if let Layer::Busy(allocator) = layer {
308                    allocator.deallocate(region);
309
310                    if allocator.is_empty() {
311                        *layer = Layer::Empty;
312                    }
313                }
314            }
315        }
316    }
317
318    fn upload_allocation(
319        &mut self,
320        data: &[u8],
321        image_width: u32,
322        image_height: u32,
323        padding: u32,
324        offset: usize,
325        allocation: &Allocation,
326        device: &wgpu::Device,
327        encoder: &mut wgpu::CommandEncoder,
328    ) {
329        use wgpu::util::DeviceExt;
330
331        let (x, y) = allocation.position();
332        let Size { width, height } = allocation.size();
333        let layer = allocation.layer();
334
335        let extent = wgpu::Extent3d {
336            width,
337            height,
338            depth_or_array_layers: 1,
339        };
340
341        let buffer =
342            device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
343                label: Some("image upload buffer"),
344                contents: data,
345                usage: wgpu::BufferUsages::COPY_SRC,
346            });
347
348        encoder.copy_buffer_to_texture(
349            wgpu::TexelCopyBufferInfo {
350                buffer: &buffer,
351                layout: wgpu::TexelCopyBufferLayout {
352                    offset: offset as u64,
353                    bytes_per_row: Some(4 * image_width + padding),
354                    rows_per_image: Some(image_height),
355                },
356            },
357            wgpu::TexelCopyTextureInfo {
358                texture: &self.texture,
359                mip_level: 0,
360                origin: wgpu::Origin3d {
361                    x,
362                    y,
363                    z: layer as u32,
364                },
365                aspect: wgpu::TextureAspect::default(),
366            },
367            extent,
368        );
369    }
370
371    fn grow(
372        &mut self,
373        amount: usize,
374        device: &wgpu::Device,
375        encoder: &mut wgpu::CommandEncoder,
376    ) {
377        if amount == 0 {
378            return;
379        }
380
381        // On the GL backend if layers.len() == 6 we need to help wgpu figure out that this texture
382        // is still a `GL_TEXTURE_2D_ARRAY` rather than `GL_TEXTURE_CUBE_MAP`. This will over-allocate
383        // some unused memory on GL, but it's better than not being able to grow the atlas past a depth
384        // of 6!
385        // https://github.com/gfx-rs/wgpu/blob/004e3efe84a320d9331371ed31fa50baa2414911/wgpu-hal/src/gles/mod.rs#L371
386        let depth_or_array_layers = match self.backend {
387            wgpu::Backend::Gl if self.layers.len() == 6 => 7,
388            _ => self.layers.len() as u32,
389        };
390
391        let new_texture = device.create_texture(&wgpu::TextureDescriptor {
392            label: Some("iced_wgpu::image texture atlas"),
393            size: wgpu::Extent3d {
394                width: SIZE,
395                height: SIZE,
396                depth_or_array_layers,
397            },
398            mip_level_count: 1,
399            sample_count: 1,
400            dimension: wgpu::TextureDimension::D2,
401            format: if color::GAMMA_CORRECTION {
402                wgpu::TextureFormat::Rgba8UnormSrgb
403            } else {
404                wgpu::TextureFormat::Rgba8Unorm
405            },
406            usage: wgpu::TextureUsages::COPY_DST
407                | wgpu::TextureUsages::COPY_SRC
408                | wgpu::TextureUsages::TEXTURE_BINDING,
409            view_formats: &[],
410        });
411
412        let amount_to_copy = self.layers.len() - amount;
413
414        for (i, layer) in
415            self.layers.iter_mut().take(amount_to_copy).enumerate()
416        {
417            if layer.is_empty() {
418                continue;
419            }
420
421            encoder.copy_texture_to_texture(
422                wgpu::TexelCopyTextureInfo {
423                    texture: &self.texture,
424                    mip_level: 0,
425                    origin: wgpu::Origin3d {
426                        x: 0,
427                        y: 0,
428                        z: i as u32,
429                    },
430                    aspect: wgpu::TextureAspect::default(),
431                },
432                wgpu::TexelCopyTextureInfo {
433                    texture: &new_texture,
434                    mip_level: 0,
435                    origin: wgpu::Origin3d {
436                        x: 0,
437                        y: 0,
438                        z: i as u32,
439                    },
440                    aspect: wgpu::TextureAspect::default(),
441                },
442                wgpu::Extent3d {
443                    width: SIZE,
444                    height: SIZE,
445                    depth_or_array_layers: 1,
446                },
447            );
448        }
449
450        self.texture = new_texture;
451        self.texture_view =
452            self.texture.create_view(&wgpu::TextureViewDescriptor {
453                dimension: Some(wgpu::TextureViewDimension::D2Array),
454                ..Default::default()
455            });
456
457        self.texture_bind_group =
458            device.create_bind_group(&wgpu::BindGroupDescriptor {
459                label: Some("iced_wgpu::image texture atlas bind group"),
460                layout: &self.texture_layout,
461                entries: &[wgpu::BindGroupEntry {
462                    binding: 0,
463                    resource: wgpu::BindingResource::TextureView(
464                        &self.texture_view,
465                    ),
466                }],
467            });
468    }
469}