1pub mod entry;
2
3mod allocation;
4mod allocator;
5mod layer;
6
7pub use allocation::Allocation;
8pub use entry::Entry;
9pub use layer::Layer;
10
11use allocator::Allocator;
12
13pub const SIZE: u32 = 2048;
14
15use crate::core::Size;
16use crate::graphics::color;
17
18use std::sync::Arc;
19
20#[derive(Debug)]
21pub struct Atlas {
22 texture: wgpu::Texture,
23 texture_view: wgpu::TextureView,
24 texture_bind_group: wgpu::BindGroup,
25 texture_layout: Arc<wgpu::BindGroupLayout>,
26 layers: Vec<Layer>,
27}
28
29impl Atlas {
30 pub fn new(
31 device: &wgpu::Device,
32 backend: wgpu::Backend,
33 texture_layout: Arc<wgpu::BindGroupLayout>,
34 ) -> Self {
35 let layers = match backend {
36 wgpu::Backend::Gl => vec![Layer::Empty, Layer::Empty],
40 _ => vec![Layer::Empty],
41 };
42
43 let extent = wgpu::Extent3d {
44 width: SIZE,
45 height: SIZE,
46 depth_or_array_layers: layers.len() as u32,
47 };
48
49 let texture = device.create_texture(&wgpu::TextureDescriptor {
50 label: Some("iced_wgpu::image texture atlas"),
51 size: extent,
52 mip_level_count: 1,
53 sample_count: 1,
54 dimension: wgpu::TextureDimension::D2,
55 format: if color::GAMMA_CORRECTION {
56 wgpu::TextureFormat::Rgba8UnormSrgb
57 } else {
58 wgpu::TextureFormat::Rgba8Unorm
59 },
60 usage: wgpu::TextureUsages::COPY_DST
61 | wgpu::TextureUsages::COPY_SRC
62 | wgpu::TextureUsages::TEXTURE_BINDING,
63 view_formats: &[],
64 });
65
66 let texture_view = texture.create_view(&wgpu::TextureViewDescriptor {
67 dimension: Some(wgpu::TextureViewDimension::D2Array),
68 ..Default::default()
69 });
70
71 let texture_bind_group =
72 device.create_bind_group(&wgpu::BindGroupDescriptor {
73 label: Some("iced_wgpu::image texture atlas bind group"),
74 layout: &texture_layout,
75 entries: &[wgpu::BindGroupEntry {
76 binding: 0,
77 resource: wgpu::BindingResource::TextureView(&texture_view),
78 }],
79 });
80
81 Atlas {
82 texture,
83 texture_view,
84 texture_bind_group,
85 texture_layout,
86 layers,
87 }
88 }
89
90 pub fn bind_group(&self) -> &wgpu::BindGroup {
91 &self.texture_bind_group
92 }
93
94 pub fn layer_count(&self) -> usize {
95 self.layers.len()
96 }
97
98 pub fn upload(
99 &mut self,
100 device: &wgpu::Device,
101 encoder: &mut wgpu::CommandEncoder,
102 width: u32,
103 height: u32,
104 data: &[u8],
105 ) -> Option<Entry> {
106 let entry = {
107 let current_size = self.layers.len();
108 let entry = self.allocate(width, height)?;
109
110 let new_layers = self.layers.len() - current_size;
112 self.grow(new_layers, device, encoder);
113
114 entry
115 };
116
117 log::debug!("Allocated atlas entry: {entry:?}");
118
119 let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT;
124 let padding = (align - (4 * width) % align) % align;
125 let padded_width = (4 * width + padding) as usize;
126 let padded_data_size = padded_width * height as usize;
127
128 let mut padded_data = vec![0; padded_data_size];
129
130 for row in 0..height as usize {
131 let offset = row * padded_width;
132
133 padded_data[offset..offset + 4 * width as usize].copy_from_slice(
134 &data[row * 4 * width as usize..(row + 1) * 4 * width as usize],
135 );
136 }
137
138 match &entry {
139 Entry::Contiguous(allocation) => {
140 self.upload_allocation(
141 &padded_data,
142 width,
143 height,
144 padding,
145 0,
146 allocation,
147 device,
148 encoder,
149 );
150 }
151 Entry::Fragmented { fragments, .. } => {
152 for fragment in fragments {
153 let (x, y) = fragment.position;
154 let offset = (y * padded_width as u32 + 4 * x) as usize;
155
156 self.upload_allocation(
157 &padded_data,
158 width,
159 height,
160 padding,
161 offset,
162 &fragment.allocation,
163 device,
164 encoder,
165 );
166 }
167 }
168 }
169
170 if log::log_enabled!(log::Level::Debug) {
171 log::debug!(
172 "Atlas layers: {} (busy: {}, allocations: {})",
173 self.layer_count(),
174 self.layers.iter().filter(|layer| !layer.is_empty()).count(),
175 self.layers.iter().map(Layer::allocations).sum::<usize>(),
176 );
177 }
178
179 Some(entry)
180 }
181
182 pub fn remove(&mut self, entry: &Entry) {
183 log::debug!("Removing atlas entry: {entry:?}");
184
185 match entry {
186 Entry::Contiguous(allocation) => {
187 self.deallocate(allocation);
188 }
189 Entry::Fragmented { fragments, .. } => {
190 for fragment in fragments {
191 self.deallocate(&fragment.allocation);
192 }
193 }
194 }
195 }
196
197 fn allocate(&mut self, width: u32, height: u32) -> Option<Entry> {
198 if width == SIZE && height == SIZE {
200 let mut empty_layers = self
201 .layers
202 .iter_mut()
203 .enumerate()
204 .filter(|(_, layer)| layer.is_empty());
205
206 if let Some((i, layer)) = empty_layers.next() {
207 *layer = Layer::Full;
208
209 return Some(Entry::Contiguous(Allocation::Full { layer: i }));
210 }
211
212 self.layers.push(Layer::Full);
213
214 return Some(Entry::Contiguous(Allocation::Full {
215 layer: self.layers.len() - 1,
216 }));
217 }
218
219 if width > SIZE || height > SIZE {
221 let mut fragments = Vec::new();
222 let mut y = 0;
223
224 while y < height {
225 let height = std::cmp::min(height - y, SIZE);
226 let mut x = 0;
227
228 while x < width {
229 let width = std::cmp::min(width - x, SIZE);
230
231 let allocation = self.allocate(width, height)?;
232
233 if let Entry::Contiguous(allocation) = allocation {
234 fragments.push(entry::Fragment {
235 position: (x, y),
236 allocation,
237 });
238 }
239
240 x += width;
241 }
242
243 y += height;
244 }
245
246 return Some(Entry::Fragmented {
247 size: Size::new(width, height),
248 fragments,
249 });
250 }
251
252 for (i, layer) in self.layers.iter_mut().enumerate() {
254 match layer {
255 Layer::Empty => {
256 let mut allocator = Allocator::new(SIZE);
257
258 if let Some(region) = allocator.allocate(width, height) {
259 *layer = Layer::Busy(allocator);
260
261 return Some(Entry::Contiguous(Allocation::Partial {
262 region,
263 layer: i,
264 }));
265 }
266 }
267 Layer::Busy(allocator) => {
268 if let Some(region) = allocator.allocate(width, height) {
269 return Some(Entry::Contiguous(Allocation::Partial {
270 region,
271 layer: i,
272 }));
273 }
274 }
275 Layer::Full => {}
276 }
277 }
278
279 let mut allocator = Allocator::new(SIZE);
281
282 if let Some(region) = allocator.allocate(width, height) {
283 self.layers.push(Layer::Busy(allocator));
284
285 return Some(Entry::Contiguous(Allocation::Partial {
286 region,
287 layer: self.layers.len() - 1,
288 }));
289 }
290
291 None
293 }
294
295 fn deallocate(&mut self, allocation: &Allocation) {
296 log::debug!("Deallocating atlas: {allocation:?}");
297
298 match allocation {
299 Allocation::Full { layer } => {
300 self.layers[*layer] = Layer::Empty;
301 }
302 Allocation::Partial { layer, region } => {
303 let layer = &mut self.layers[*layer];
304
305 if let Layer::Busy(allocator) = layer {
306 allocator.deallocate(region);
307
308 if allocator.is_empty() {
309 *layer = Layer::Empty;
310 }
311 }
312 }
313 }
314 }
315
316 fn upload_allocation(
317 &mut self,
318 data: &[u8],
319 image_width: u32,
320 image_height: u32,
321 padding: u32,
322 offset: usize,
323 allocation: &Allocation,
324 device: &wgpu::Device,
325 encoder: &mut wgpu::CommandEncoder,
326 ) {
327 use wgpu::util::DeviceExt;
328
329 let (x, y) = allocation.position();
330 let Size { width, height } = allocation.size();
331 let layer = allocation.layer();
332
333 let extent = wgpu::Extent3d {
334 width,
335 height,
336 depth_or_array_layers: 1,
337 };
338
339 let buffer =
340 device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
341 label: Some("image upload buffer"),
342 contents: data,
343 usage: wgpu::BufferUsages::COPY_SRC,
344 });
345
346 encoder.copy_buffer_to_texture(
347 wgpu::TexelCopyBufferInfo {
348 buffer: &buffer,
349 layout: wgpu::TexelCopyBufferLayout {
350 offset: offset as u64,
351 bytes_per_row: Some(4 * image_width + padding),
352 rows_per_image: Some(image_height),
353 },
354 },
355 wgpu::TexelCopyTextureInfo {
356 texture: &self.texture,
357 mip_level: 0,
358 origin: wgpu::Origin3d {
359 x,
360 y,
361 z: layer as u32,
362 },
363 aspect: wgpu::TextureAspect::default(),
364 },
365 extent,
366 );
367 }
368
369 fn grow(
370 &mut self,
371 amount: usize,
372 device: &wgpu::Device,
373 encoder: &mut wgpu::CommandEncoder,
374 ) {
375 if amount == 0 {
376 return;
377 }
378
379 let new_texture = device.create_texture(&wgpu::TextureDescriptor {
380 label: Some("iced_wgpu::image texture atlas"),
381 size: wgpu::Extent3d {
382 width: SIZE,
383 height: SIZE,
384 depth_or_array_layers: self.layers.len() as u32,
385 },
386 mip_level_count: 1,
387 sample_count: 1,
388 dimension: wgpu::TextureDimension::D2,
389 format: if color::GAMMA_CORRECTION {
390 wgpu::TextureFormat::Rgba8UnormSrgb
391 } else {
392 wgpu::TextureFormat::Rgba8Unorm
393 },
394 usage: wgpu::TextureUsages::COPY_DST
395 | wgpu::TextureUsages::COPY_SRC
396 | wgpu::TextureUsages::TEXTURE_BINDING,
397 view_formats: &[],
398 });
399
400 let amount_to_copy = self.layers.len() - amount;
401
402 for (i, layer) in
403 self.layers.iter_mut().take(amount_to_copy).enumerate()
404 {
405 if layer.is_empty() {
406 continue;
407 }
408
409 encoder.copy_texture_to_texture(
410 wgpu::TexelCopyTextureInfo {
411 texture: &self.texture,
412 mip_level: 0,
413 origin: wgpu::Origin3d {
414 x: 0,
415 y: 0,
416 z: i as u32,
417 },
418 aspect: wgpu::TextureAspect::default(),
419 },
420 wgpu::TexelCopyTextureInfo {
421 texture: &new_texture,
422 mip_level: 0,
423 origin: wgpu::Origin3d {
424 x: 0,
425 y: 0,
426 z: i as u32,
427 },
428 aspect: wgpu::TextureAspect::default(),
429 },
430 wgpu::Extent3d {
431 width: SIZE,
432 height: SIZE,
433 depth_or_array_layers: 1,
434 },
435 );
436 }
437
438 self.texture = new_texture;
439 self.texture_view =
440 self.texture.create_view(&wgpu::TextureViewDescriptor {
441 dimension: Some(wgpu::TextureViewDimension::D2Array),
442 ..Default::default()
443 });
444
445 self.texture_bind_group =
446 device.create_bind_group(&wgpu::BindGroupDescriptor {
447 label: Some("iced_wgpu::image texture atlas bind group"),
448 layout: &self.texture_layout,
449 entries: &[wgpu::BindGroupEntry {
450 binding: 0,
451 resource: wgpu::BindingResource::TextureView(
452 &self.texture_view,
453 ),
454 }],
455 });
456 }
457}