VirtualBox

source: vbox/trunk/src/libs/liblzma-5.4.1/common/stream_decoder_mt.c

Last change on this file was 98737, checked in by vboxsync, 21 months ago

lobs/liblzma-5.4.1: Windows build fixes, bugref:10254

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 63.1 KB
Line 
1///////////////////////////////////////////////////////////////////////////////
2//
3/// \file stream_decoder_mt.c
4/// \brief Multithreaded .xz Stream decoder
5//
6// Authors: Sebastian Andrzej Siewior
7// Lasse Collin
8//
9// This file has been put into the public domain.
10// You can do whatever you want with this file.
11//
12///////////////////////////////////////////////////////////////////////////////
13
14#include "common.h"
15#include "block_decoder.h"
16#include "stream_decoder.h"
17#include "index.h"
18#include "outqueue.h"
19
20
21typedef enum {
22 /// Waiting for work.
23 /// Main thread may change this to THR_RUN or THR_EXIT.
24 THR_IDLE,
25
26 /// Decoding is in progress.
27 /// Main thread may change this to THR_STOP or THR_EXIT.
28 /// The worker thread may change this to THR_IDLE.
29 THR_RUN,
30
31 /// The main thread wants the thread to stop whatever it was doing
32 /// but not exit. Main thread may change this to THR_EXIT.
33 /// The worker thread may change this to THR_IDLE.
34 THR_STOP,
35
36 /// The main thread wants the thread to exit.
37 THR_EXIT,
38
39} worker_state;
40
41
42typedef enum {
43 /// Partial updates (storing of worker thread progress
44 /// to lzma_outbuf) are disabled.
45 PARTIAL_DISABLED,
46
47 /// Main thread requests partial updates to be enabled but
48 /// no partial update has been done by the worker thread yet.
49 ///
50 /// Changing from PARTIAL_DISABLED to PARTIAL_START requires
51 /// use of the worker-thread mutex. Other transitions don't
52 /// need a mutex.
53 PARTIAL_START,
54
55 /// Partial updates are enabled and the worker thread has done
56 /// at least one partial update.
57 PARTIAL_ENABLED,
58
59} partial_update_mode;
60
61
62struct worker_thread {
63 /// Worker state is protected with our mutex.
64 worker_state state;
65
66 /// Input buffer that will contain the whole Block except Block Header.
67 uint8_t *in;
68
69 /// Amount of memory allocated for "in"
70 size_t in_size;
71
72 /// Number of bytes written to "in" by the main thread
73 size_t in_filled;
74
75 /// Number of bytes consumed from "in" by the worker thread.
76 size_t in_pos;
77
78 /// Amount of uncompressed data that has been decoded. This local
79 /// copy is needed because updating outbuf->pos requires locking
80 /// the main mutex (coder->mutex).
81 size_t out_pos;
82
83 /// Pointer to the main structure is needed to (1) lock the main
84 /// mutex (coder->mutex) when updating outbuf->pos and (2) when
85 /// putting this thread back to the stack of free threads.
86 struct lzma_stream_coder *coder;
87
88 /// The allocator is set by the main thread. Since a copy of the
89 /// pointer is kept here, the application must not change the
90 /// allocator before calling lzma_end().
91 const lzma_allocator *allocator;
92
93 /// Output queue buffer to which the uncompressed data is written.
94 lzma_outbuf *outbuf;
95
96 /// Amount of compressed data that has already been decompressed.
97 /// This is updated from in_pos when our mutex is locked.
98 /// This is size_t, not uint64_t, because per-thread progress
99 /// is limited to sizes of allocated buffers.
100 size_t progress_in;
101
102 /// Like progress_in but for uncompressed data.
103 size_t progress_out;
104
105 /// Updating outbuf->pos requires locking the main mutex
106 /// (coder->mutex). Since the main thread will only read output
107 /// from the oldest outbuf in the queue, only the worker thread
108 /// that is associated with the oldest outbuf needs to update its
109 /// outbuf->pos. This avoids useless mutex contention that would
110 /// happen if all worker threads were frequently locking the main
111 /// mutex to update their outbuf->pos.
112 ///
113 /// Only when partial_update is something else than PARTIAL_DISABLED,
114 /// this worker thread will update outbuf->pos after each call to
115 /// the Block decoder.
116 partial_update_mode partial_update;
117
118 /// Block decoder
119 lzma_next_coder block_decoder;
120
121 /// Thread-specific Block options are needed because the Block
122 /// decoder modifies the struct given to it at initialization.
123 lzma_block block_options;
124
125 /// Filter chain memory usage
126 uint64_t mem_filters;
127
128 /// Next structure in the stack of free worker threads.
129 struct worker_thread *next;
130
131 mythread_mutex mutex;
132 mythread_cond cond;
133
134 /// The ID of this thread is used to join the thread
135 /// when it's not needed anymore.
136 mythread thread_id;
137};
138
139
140struct lzma_stream_coder {
141 enum {
142 SEQ_STREAM_HEADER,
143 SEQ_BLOCK_HEADER,
144 SEQ_BLOCK_INIT,
145 SEQ_BLOCK_THR_INIT,
146 SEQ_BLOCK_THR_RUN,
147 SEQ_BLOCK_DIRECT_INIT,
148 SEQ_BLOCK_DIRECT_RUN,
149 SEQ_INDEX_WAIT_OUTPUT,
150 SEQ_INDEX_DECODE,
151 SEQ_STREAM_FOOTER,
152 SEQ_STREAM_PADDING,
153 SEQ_ERROR,
154 } sequence;
155
156 /// Block decoder
157 lzma_next_coder block_decoder;
158
159 /// Every Block Header will be decoded into this structure.
160 /// This is also used to initialize a Block decoder when in
161 /// direct mode. In threaded mode, a thread-specific copy will
162 /// be made for decoder initialization because the Block decoder
163 /// will modify the structure given to it.
164 lzma_block block_options;
165
166 /// Buffer to hold a filter chain for Block Header decoding and
167 /// initialization. These are freed after successful Block decoder
168 /// initialization or at stream_decoder_mt_end(). The thread-specific
169 /// copy of block_options won't hold a pointer to filters[] after
170 /// initialization.
171 lzma_filter filters[LZMA_FILTERS_MAX + 1];
172
173 /// Stream Flags from Stream Header
174 lzma_stream_flags stream_flags;
175
176 /// Index is hashed so that it can be compared to the sizes of Blocks
177 /// with O(1) memory usage.
178 lzma_index_hash *index_hash;
179
180
181 /// Maximum wait time if cannot use all the input and cannot
182 /// fill the output buffer. This is in milliseconds.
183 uint32_t timeout;
184
185
186 /// Error code from a worker thread.
187 ///
188 /// \note Use mutex.
189 lzma_ret thread_error;
190
191 /// Error code to return after pending output has been copied out. If
192 /// set in read_output_and_wait(), this is a mirror of thread_error.
193 /// If set in stream_decode_mt() then it's, for example, error that
194 /// occurred when decoding Block Header.
195 lzma_ret pending_error;
196
197 /// Number of threads that will be created at maximum.
198 uint32_t threads_max;
199
200 /// Number of thread structures that have been initialized from
201 /// "threads", and thus the number of worker threads actually
202 /// created so far.
203 uint32_t threads_initialized;
204
205 /// Array of allocated thread-specific structures. When no threads
206 /// are in use (direct mode) this is NULL. In threaded mode this
207 /// points to an array of threads_max number of worker_thread structs.
208 struct worker_thread *threads;
209
210 /// Stack of free threads. When a thread finishes, it puts itself
211 /// back into this stack. This starts as empty because threads
212 /// are created only when actually needed.
213 ///
214 /// \note Use mutex.
215 struct worker_thread *threads_free;
216
217 /// The most recent worker thread to which the main thread writes
218 /// the new input from the application.
219 struct worker_thread *thr;
220
221 /// Output buffer queue for decompressed data from the worker threads
222 ///
223 /// \note Use mutex with operations that need it.
224 lzma_outq outq;
225
226 mythread_mutex mutex;
227 mythread_cond cond;
228
229
230 /// Memory usage that will not be exceeded in multi-threaded mode.
231 /// Single-threaded mode can exceed this even by a large amount.
232 uint64_t memlimit_threading;
233
234 /// Memory usage limit that should never be exceeded.
235 /// LZMA_MEMLIMIT_ERROR will be returned if decoding isn't possible
236 /// even in single-threaded mode without exceeding this limit.
237 uint64_t memlimit_stop;
238
239 /// Amount of memory in use by the direct mode decoder
240 /// (coder->block_decoder). In threaded mode this is 0.
241 uint64_t mem_direct_mode;
242
243 /// Amount of memory needed by the running worker threads.
244 /// This doesn't include the memory needed by the output buffer.
245 ///
246 /// \note Use mutex.
247 uint64_t mem_in_use;
248
249 /// Amount of memory used by the idle (cached) threads.
250 ///
251 /// \note Use mutex.
252 uint64_t mem_cached;
253
254
255 /// Amount of memory needed for the filter chain of the next Block.
256 uint64_t mem_next_filters;
257
258 /// Amount of memory needed for the thread-specific input buffer
259 /// for the next Block.
260 uint64_t mem_next_in;
261
262 /// Amount of memory actually needed to decode the next Block
263 /// in threaded mode. This is
264 /// mem_next_filters + mem_next_in + memory needed for lzma_outbuf.
265 uint64_t mem_next_block;
266
267
268 /// Amount of compressed data in Stream Header + Blocks that have
269 /// already been finished.
270 ///
271 /// \note Use mutex.
272 uint64_t progress_in;
273
274 /// Amount of uncompressed data in Blocks that have already
275 /// been finished.
276 ///
277 /// \note Use mutex.
278 uint64_t progress_out;
279
280
281 /// If true, LZMA_NO_CHECK is returned if the Stream has
282 /// no integrity check.
283 bool tell_no_check;
284
285 /// If true, LZMA_UNSUPPORTED_CHECK is returned if the Stream has
286 /// an integrity check that isn't supported by this liblzma build.
287 bool tell_unsupported_check;
288
289 /// If true, LZMA_GET_CHECK is returned after decoding Stream Header.
290 bool tell_any_check;
291
292 /// If true, we will tell the Block decoder to skip calculating
293 /// and verifying the integrity check.
294 bool ignore_check;
295
296 /// If true, we will decode concatenated Streams that possibly have
297 /// Stream Padding between or after them. LZMA_STREAM_END is returned
298 /// once the application isn't giving us any new input (LZMA_FINISH),
299 /// and we aren't in the middle of a Stream, and possible
300 /// Stream Padding is a multiple of four bytes.
301 bool concatenated;
302
303 /// If true, we will return any errors immediately instead of first
304 /// producing all output before the location of the error.
305 bool fail_fast;
306
307
308 /// When decoding concatenated Streams, this is true as long as we
309 /// are decoding the first Stream. This is needed to avoid misleading
310 /// LZMA_FORMAT_ERROR in case the later Streams don't have valid magic
311 /// bytes.
312 bool first_stream;
313
314 /// This is used to track if the previous call to stream_decode_mt()
315 /// had output space (*out_pos < out_size) and managed to fill the
316 /// output buffer (*out_pos == out_size). This may be set to true
317 /// in read_output_and_wait(). This is read and then reset to false
318 /// at the beginning of stream_decode_mt().
319 ///
320 /// This is needed to support applications that call lzma_code() in
321 /// such a way that more input is provided only when lzma_code()
322 /// didn't fill the output buffer completely. Basically, this makes
323 /// it easier to convert such applications from single-threaded
324 /// decoder to multi-threaded decoder.
325 bool out_was_filled;
326
327 /// Write position in buffer[] and position in Stream Padding
328 size_t pos;
329
330 /// Buffer to hold Stream Header, Block Header, and Stream Footer.
331 /// Block Header has biggest maximum size.
332 uint8_t buffer[LZMA_BLOCK_HEADER_SIZE_MAX];
333};
334
335
336/// Enables updating of outbuf->pos. This is a callback function that is
337/// used with lzma_outq_enable_partial_output().
338static void
339worker_enable_partial_update(void *thr_ptr)
340{
341 struct worker_thread *thr = thr_ptr;
342
343 mythread_sync(thr->mutex) {
344 thr->partial_update = PARTIAL_START;
345 mythread_cond_signal(&thr->cond);
346 }
347}
348
349
350/// Things do to at THR_STOP or when finishing a Block.
351/// This is called with thr->mutex locked.
352static void
353worker_stop(struct worker_thread *thr)
354{
355 // Update memory usage counters.
356 thr->coder->mem_in_use -= thr->in_size;
357 thr->in_size = 0; // thr->in was freed above.
358
359 thr->coder->mem_in_use -= thr->mem_filters;
360 thr->coder->mem_cached += thr->mem_filters;
361
362 // Put this thread to the stack of free threads.
363 thr->next = thr->coder->threads_free;
364 thr->coder->threads_free = thr;
365
366 mythread_cond_signal(&thr->coder->cond);
367 return;
368}
369
370
371static MYTHREAD_RET_TYPE
372#ifndef VBOX
373worker_decoder(void *thr_ptr)
374#else
375worker_decoder(RTTHREAD hThread, void *thr_ptr)
376#endif
377{
378 struct worker_thread *thr = thr_ptr;
379 size_t in_filled;
380 partial_update_mode partial_update;
381 lzma_ret ret;
382
383next_loop_lock:
384
385 mythread_mutex_lock(&thr->mutex);
386next_loop_unlocked:
387
388 if (thr->state == THR_IDLE) {
389 mythread_cond_wait(&thr->cond, &thr->mutex);
390 goto next_loop_unlocked;
391 }
392
393 if (thr->state == THR_EXIT) {
394 mythread_mutex_unlock(&thr->mutex);
395
396 lzma_free(thr->in, thr->allocator);
397 lzma_next_end(&thr->block_decoder, thr->allocator);
398
399 mythread_mutex_destroy(&thr->mutex);
400 mythread_cond_destroy(&thr->cond);
401
402 return MYTHREAD_RET_VALUE;
403 }
404
405 if (thr->state == THR_STOP) {
406 thr->state = THR_IDLE;
407 mythread_mutex_unlock(&thr->mutex);
408
409 mythread_sync(thr->coder->mutex) {
410 worker_stop(thr);
411 }
412
413 goto next_loop_lock;
414 }
415
416 assert(thr->state == THR_RUN);
417
418 // Update progress info for get_progress().
419 thr->progress_in = thr->in_pos;
420 thr->progress_out = thr->out_pos;
421
422 // If we don't have any new input, wait for a signal from the main
423 // thread except if partial output has just been enabled. In that
424 // case we will do one normal run so that the partial output info
425 // gets passed to the main thread. The call to block_decoder.code()
426 // is useless but harmless as it can occur only once per Block.
427 in_filled = thr->in_filled;
428 partial_update = thr->partial_update;
429
430 if (in_filled == thr->in_pos && partial_update != PARTIAL_START) {
431 mythread_cond_wait(&thr->cond, &thr->mutex);
432 goto next_loop_unlocked;
433 }
434
435 mythread_mutex_unlock(&thr->mutex);
436
437 // Pass the input in small chunks to the Block decoder.
438 // This way we react reasonably fast if we are told to stop/exit,
439 // and (when partial update is enabled) we tell about our progress
440 // to the main thread frequently enough.
441 const size_t chunk_size = 16384;
442 if ((in_filled - thr->in_pos) > chunk_size)
443 in_filled = thr->in_pos + chunk_size;
444
445 ret = thr->block_decoder.code(
446 thr->block_decoder.coder, thr->allocator,
447 thr->in, &thr->in_pos, in_filled,
448 thr->outbuf->buf, &thr->out_pos,
449 thr->outbuf->allocated, LZMA_RUN);
450
451 if (ret == LZMA_OK) {
452 if (partial_update != PARTIAL_DISABLED) {
453 // The main thread uses thr->mutex to change from
454 // PARTIAL_DISABLED to PARTIAL_START. The main thread
455 // doesn't care about this variable after that so we
456 // can safely change it here to PARTIAL_ENABLED
457 // without a mutex.
458 thr->partial_update = PARTIAL_ENABLED;
459
460 // The main thread is reading decompressed data
461 // from thr->outbuf. Tell the main thread about
462 // our progress.
463 //
464 // NOTE: It's possible that we consumed input without
465 // producing any new output so it's possible that
466 // only in_pos has changed. In case of PARTIAL_START
467 // it is possible that neither in_pos nor out_pos has
468 // changed.
469 mythread_sync(thr->coder->mutex) {
470 thr->outbuf->pos = thr->out_pos;
471 thr->outbuf->decoder_in_pos = thr->in_pos;
472 mythread_cond_signal(&thr->coder->cond);
473 }
474 }
475
476 goto next_loop_lock;
477 }
478
479 // Either we finished successfully (LZMA_STREAM_END) or an error
480 // occurred. Both cases are handled almost identically. The error
481 // case requires updating thr->coder->thread_error.
482 //
483 // The sizes are in the Block Header and the Block decoder
484 // checks that they match, thus we know these:
485 assert(ret != LZMA_STREAM_END || thr->in_pos == thr->in_size);
486 assert(ret != LZMA_STREAM_END
487 || thr->out_pos == thr->block_options.uncompressed_size);
488
489 // Free the input buffer. Don't update in_size as we need
490 // it later to update thr->coder->mem_in_use.
491 lzma_free(thr->in, thr->allocator);
492 thr->in = NULL;
493
494 mythread_sync(thr->mutex) {
495 if (thr->state != THR_EXIT)
496 thr->state = THR_IDLE;
497 }
498
499 mythread_sync(thr->coder->mutex) {
500 // Move our progress info to the main thread.
501 thr->coder->progress_in += thr->in_pos;
502 thr->coder->progress_out += thr->out_pos;
503 thr->progress_in = 0;
504 thr->progress_out = 0;
505
506 // Mark the outbuf as finished.
507 thr->outbuf->pos = thr->out_pos;
508 thr->outbuf->decoder_in_pos = thr->in_pos;
509 thr->outbuf->finished = true;
510 thr->outbuf->finish_ret = ret;
511 thr->outbuf = NULL;
512
513 // If an error occurred, tell it to the main thread.
514 if (ret != LZMA_STREAM_END
515 && thr->coder->thread_error == LZMA_OK)
516 thr->coder->thread_error = ret;
517
518 worker_stop(thr);
519 }
520
521 goto next_loop_lock;
522}
523
524
525/// Tells the worker threads to exit and waits for them to terminate.
526static void
527threads_end(struct lzma_stream_coder *coder, const lzma_allocator *allocator)
528{
529 for (uint32_t i = 0; i < coder->threads_initialized; ++i) {
530 mythread_sync(coder->threads[i].mutex) {
531 coder->threads[i].state = THR_EXIT;
532 mythread_cond_signal(&coder->threads[i].cond);
533 }
534 }
535
536 for (uint32_t i = 0; i < coder->threads_initialized; ++i)
537 mythread_join(coder->threads[i].thread_id);
538
539 lzma_free(coder->threads, allocator);
540 coder->threads_initialized = 0;
541 coder->threads = NULL;
542 coder->threads_free = NULL;
543
544 // The threads don't update these when they exit. Do it here.
545 coder->mem_in_use = 0;
546 coder->mem_cached = 0;
547
548 return;
549}
550
551
552static void
553threads_stop(struct lzma_stream_coder *coder)
554{
555 for (uint32_t i = 0; i < coder->threads_initialized; ++i) {
556 mythread_sync(coder->threads[i].mutex) {
557 // The state must be changed conditionally because
558 // THR_IDLE -> THR_STOP is not a valid state change.
559 if (coder->threads[i].state != THR_IDLE) {
560 coder->threads[i].state = THR_STOP;
561 mythread_cond_signal(&coder->threads[i].cond);
562 }
563 }
564 }
565
566 return;
567}
568
569
570/// Initialize a new worker_thread structure and create a new thread.
571static lzma_ret
572initialize_new_thread(struct lzma_stream_coder *coder,
573 const lzma_allocator *allocator)
574{
575 // Allocate the coder->threads array if needed. It's done here instead
576 // of when initializing the decoder because we don't need this if we
577 // use the direct mode (we may even free coder->threads in the middle
578 // of the file if we switch from threaded to direct mode).
579 if (coder->threads == NULL) {
580 coder->threads = lzma_alloc(
581 coder->threads_max * sizeof(struct worker_thread),
582 allocator);
583
584 if (coder->threads == NULL)
585 return LZMA_MEM_ERROR;
586 }
587
588 // Pick a free structure.
589 assert(coder->threads_initialized < coder->threads_max);
590 struct worker_thread *thr
591 = &coder->threads[coder->threads_initialized];
592
593 if (mythread_mutex_init(&thr->mutex))
594 goto error_mutex;
595
596 if (mythread_cond_init(&thr->cond))
597 goto error_cond;
598
599 thr->state = THR_IDLE;
600 thr->in = NULL;
601 thr->in_size = 0;
602 thr->allocator = allocator;
603 thr->coder = coder;
604 thr->outbuf = NULL;
605 thr->block_decoder = LZMA_NEXT_CODER_INIT;
606 thr->mem_filters = 0;
607
608 if (mythread_create(&thr->thread_id, worker_decoder, thr))
609 goto error_thread;
610
611 ++coder->threads_initialized;
612 coder->thr = thr;
613
614 return LZMA_OK;
615
616error_thread:
617 mythread_cond_destroy(&thr->cond);
618
619error_cond:
620 mythread_mutex_destroy(&thr->mutex);
621
622error_mutex:
623 return LZMA_MEM_ERROR;
624}
625
626
627static lzma_ret
628get_thread(struct lzma_stream_coder *coder, const lzma_allocator *allocator)
629{
630 // If there is a free structure on the stack, use it.
631 mythread_sync(coder->mutex) {
632 if (coder->threads_free != NULL) {
633 coder->thr = coder->threads_free;
634 coder->threads_free = coder->threads_free->next;
635
636 // The thread is no longer in the cache so substract
637 // it from the cached memory usage. Don't add it
638 // to mem_in_use though; the caller will handle it
639 // since it knows how much memory it will actually
640 // use (the filter chain might change).
641 coder->mem_cached -= coder->thr->mem_filters;
642 }
643 }
644
645 if (coder->thr == NULL) {
646 assert(coder->threads_initialized < coder->threads_max);
647
648 // Initialize a new thread.
649 return_if_error(initialize_new_thread(coder, allocator));
650 }
651
652 coder->thr->in_filled = 0;
653 coder->thr->in_pos = 0;
654 coder->thr->out_pos = 0;
655
656 coder->thr->progress_in = 0;
657 coder->thr->progress_out = 0;
658
659 coder->thr->partial_update = PARTIAL_DISABLED;
660
661 return LZMA_OK;
662}
663
664
665static lzma_ret
666read_output_and_wait(struct lzma_stream_coder *coder,
667 const lzma_allocator *allocator,
668 uint8_t *restrict out, size_t *restrict out_pos,
669 size_t out_size,
670 bool *input_is_possible,
671 bool waiting_allowed,
672 mythread_condtime *wait_abs, bool *has_blocked)
673{
674 lzma_ret ret = LZMA_OK;
675
676 mythread_sync(coder->mutex) {
677 do {
678 // Get as much output from the queue as is possible
679 // without blocking.
680 const size_t out_start = *out_pos;
681 do {
682 ret = lzma_outq_read(&coder->outq, allocator,
683 out, out_pos, out_size,
684 NULL, NULL);
685
686 // If a Block was finished, tell the worker
687 // thread of the next Block (if it is still
688 // running) to start telling the main thread
689 // when new output is available.
690 if (ret == LZMA_STREAM_END)
691 lzma_outq_enable_partial_output(
692 &coder->outq,
693 &worker_enable_partial_update);
694
695 // Loop until a Block wasn't finished.
696 // It's important to loop around even if
697 // *out_pos == out_size because there could
698 // be an empty Block that will return
699 // LZMA_STREAM_END without needing any
700 // output space.
701 } while (ret == LZMA_STREAM_END);
702
703 // Check if lzma_outq_read reported an error from
704 // the Block decoder.
705 if (ret != LZMA_OK)
706 break;
707
708 // If the output buffer is now full but it wasn't full
709 // when this function was called, set out_was_filled.
710 // This way the next call to stream_decode_mt() knows
711 // that some output was produced and no output space
712 // remained in the previous call to stream_decode_mt().
713 if (*out_pos == out_size && *out_pos != out_start)
714 coder->out_was_filled = true;
715
716 // Check if any thread has indicated an error.
717 if (coder->thread_error != LZMA_OK) {
718 // If LZMA_FAIL_FAST was used, report errors
719 // from worker threads immediately.
720 if (coder->fail_fast) {
721 ret = coder->thread_error;
722 break;
723 }
724
725 // Otherwise set pending_error. The value we
726 // set here will not actually get used other
727 // than working as a flag that an error has
728 // occurred. This is because in SEQ_ERROR
729 // all output before the error will be read
730 // first by calling this function, and once we
731 // reach the location of the (first) error the
732 // error code from the above lzma_outq_read()
733 // will be returned to the application.
734 //
735 // Use LZMA_PROG_ERROR since the value should
736 // never leak to the application. It's
737 // possible that pending_error has already
738 // been set but that doesn't matter: if we get
739 // here, pending_error only works as a flag.
740 coder->pending_error = LZMA_PROG_ERROR;
741 }
742
743 // Check if decoding of the next Block can be started.
744 // The memusage of the active threads must be low
745 // enough, there must be a free buffer slot in the
746 // output queue, and there must be a free thread
747 // (that can be either created or an existing one
748 // reused).
749 //
750 // NOTE: This is checked after reading the output
751 // above because reading the output can free a slot in
752 // the output queue and also reduce active memusage.
753 //
754 // NOTE: If output queue is empty, then input will
755 // always be possible.
756 if (input_is_possible != NULL
757 && coder->memlimit_threading
758 - coder->mem_in_use
759 - coder->outq.mem_in_use
760 >= coder->mem_next_block
761 && lzma_outq_has_buf(&coder->outq)
762 && (coder->threads_initialized
763 < coder->threads_max
764 || coder->threads_free
765 != NULL)) {
766 *input_is_possible = true;
767 break;
768 }
769
770 // If the caller doesn't want us to block, return now.
771 if (!waiting_allowed)
772 break;
773
774 // This check is needed only when input_is_possible
775 // is NULL. We must return if we aren't waiting for
776 // input to become possible and there is no more
777 // output coming from the queue.
778 if (lzma_outq_is_empty(&coder->outq)) {
779 assert(input_is_possible == NULL);
780 break;
781 }
782
783 // If there is more data available from the queue,
784 // our out buffer must be full and we need to return
785 // so that the application can provide more output
786 // space.
787 //
788 // NOTE: In general lzma_outq_is_readable() can return
789 // true also when there are no more bytes available.
790 // This can happen when a Block has finished without
791 // providing any new output. We know that this is not
792 // the case because in the beginning of this loop we
793 // tried to read as much as possible even when we had
794 // no output space left and the mutex has been locked
795 // all the time (so worker threads cannot have changed
796 // anything). Thus there must be actual pending output
797 // in the queue.
798 if (lzma_outq_is_readable(&coder->outq)) {
799 assert(*out_pos == out_size);
800 break;
801 }
802
803 // If the application stops providing more input
804 // in the middle of a Block, there will eventually
805 // be one worker thread left that is stuck waiting for
806 // more input (that might never arrive) and a matching
807 // outbuf which the worker thread cannot finish due
808 // to lack of input. We must detect this situation,
809 // otherwise we would end up waiting indefinitely
810 // (if no timeout is in use) or keep returning
811 // LZMA_TIMED_OUT while making no progress. Thus, the
812 // application would never get LZMA_BUF_ERROR from
813 // lzma_code() which would tell the application that
814 // no more progress is possible. No LZMA_BUF_ERROR
815 // means that, for example, truncated .xz files could
816 // cause an infinite loop.
817 //
818 // A worker thread doing partial updates will
819 // store not only the output position in outbuf->pos
820 // but also the matching input position in
821 // outbuf->decoder_in_pos. Here we check if that
822 // input position matches the amount of input that
823 // the worker thread has been given (in_filled).
824 // If so, we must return and not wait as no more
825 // output will be coming without first getting more
826 // input to the worker thread. If the application
827 // keeps calling lzma_code() without providing more
828 // input, it will eventually get LZMA_BUF_ERROR.
829 //
830 // NOTE: We can read partial_update and in_filled
831 // without thr->mutex as only the main thread
832 // modifies these variables. decoder_in_pos requires
833 // coder->mutex which we are already holding.
834 if (coder->thr != NULL && coder->thr->partial_update
835 != PARTIAL_DISABLED) {
836 // There is exactly one outbuf in the queue.
837 assert(coder->thr->outbuf == coder->outq.head);
838 assert(coder->thr->outbuf == coder->outq.tail);
839
840 if (coder->thr->outbuf->decoder_in_pos
841 == coder->thr->in_filled)
842 break;
843 }
844
845 // Wait for input or output to become possible.
846 if (coder->timeout != 0) {
847 // See the comment in stream_encoder_mt.c
848 // about why mythread_condtime_set() is used
849 // like this.
850 //
851 // FIXME?
852 // In contrast to the encoder, this calls
853 // _condtime_set while the mutex is locked.
854 if (!*has_blocked) {
855 *has_blocked = true;
856 mythread_condtime_set(wait_abs,
857 &coder->cond,
858 coder->timeout);
859 }
860
861 if (mythread_cond_timedwait(&coder->cond,
862 &coder->mutex,
863 wait_abs) != 0) {
864 ret = LZMA_TIMED_OUT;
865 break;
866 }
867 } else {
868 mythread_cond_wait(&coder->cond,
869 &coder->mutex);
870 }
871 } while (ret == LZMA_OK);
872 }
873
874 // If we are returning an error, then the application cannot get
875 // more output from us and thus keeping the threads running is
876 // useless and waste of CPU time.
877 if (ret != LZMA_OK && ret != LZMA_TIMED_OUT)
878 threads_stop(coder);
879
880 return ret;
881}
882
883
884static lzma_ret
885decode_block_header(struct lzma_stream_coder *coder,
886 const lzma_allocator *allocator, const uint8_t *restrict in,
887 size_t *restrict in_pos, size_t in_size)
888{
889 if (*in_pos >= in_size)
890 return LZMA_OK;
891
892 if (coder->pos == 0) {
893 // Detect if it's Index.
894 if (in[*in_pos] == INDEX_INDICATOR)
895 return LZMA_INDEX_DETECTED;
896
897 // Calculate the size of the Block Header. Note that
898 // Block Header decoder wants to see this byte too
899 // so don't advance *in_pos.
900 coder->block_options.header_size
901 = lzma_block_header_size_decode(
902 in[*in_pos]);
903 }
904
905 // Copy the Block Header to the internal buffer.
906 lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
907 coder->block_options.header_size);
908
909 // Return if we didn't get the whole Block Header yet.
910 if (coder->pos < coder->block_options.header_size)
911 return LZMA_OK;
912
913 coder->pos = 0;
914
915 // Version 1 is needed to support the .ignore_check option.
916 coder->block_options.version = 1;
917
918 // Block Header decoder will initialize all members of this array
919 // so we don't need to do it here.
920 coder->block_options.filters = coder->filters;
921
922 // Decode the Block Header.
923 return_if_error(lzma_block_header_decode(&coder->block_options,
924 allocator, coder->buffer));
925
926 // If LZMA_IGNORE_CHECK was used, this flag needs to be set.
927 // It has to be set after lzma_block_header_decode() because
928 // it always resets this to false.
929 coder->block_options.ignore_check = coder->ignore_check;
930
931 // coder->block_options is ready now.
932 return LZMA_STREAM_END;
933}
934
935
936/// Get the size of the Compressed Data + Block Padding + Check.
937static size_t
938comp_blk_size(const struct lzma_stream_coder *coder)
939{
940 return vli_ceil4(coder->block_options.compressed_size)
941 + lzma_check_size(coder->stream_flags.check);
942}
943
944
945/// Returns true if the size (compressed or uncompressed) is such that
946/// threaded decompression cannot be used. Sizes that are too big compared
947/// to SIZE_MAX must be rejected to avoid integer overflows and truncations
948/// when lzma_vli is assigned to a size_t.
949static bool
950is_direct_mode_needed(lzma_vli size)
951{
952 return size == LZMA_VLI_UNKNOWN || size > SIZE_MAX / 3;
953}
954
955
956static lzma_ret
957stream_decoder_reset(struct lzma_stream_coder *coder,
958 const lzma_allocator *allocator)
959{
960 // Initialize the Index hash used to verify the Index.
961 coder->index_hash = lzma_index_hash_init(coder->index_hash, allocator);
962 if (coder->index_hash == NULL)
963 return LZMA_MEM_ERROR;
964
965 // Reset the rest of the variables.
966 coder->sequence = SEQ_STREAM_HEADER;
967 coder->pos = 0;
968
969 return LZMA_OK;
970}
971
972
973static lzma_ret
974stream_decode_mt(void *coder_ptr, const lzma_allocator *allocator,
975 const uint8_t *restrict in, size_t *restrict in_pos,
976 size_t in_size,
977 uint8_t *restrict out, size_t *restrict out_pos,
978 size_t out_size, lzma_action action)
979{
980 struct lzma_stream_coder *coder = coder_ptr;
981
982 mythread_condtime wait_abs;
983 bool has_blocked = false;
984
985 // Determine if in SEQ_BLOCK_HEADER and SEQ_BLOCK_THR_RUN we should
986 // tell read_output_and_wait() to wait until it can fill the output
987 // buffer (or a timeout occurs). Two conditions must be met:
988 //
989 // (1) If the caller provided no new input. The reason for this
990 // can be, for example, the end of the file or that there is
991 // a pause in the input stream and more input is available
992 // a little later. In this situation we should wait for output
993 // because otherwise we would end up in a busy-waiting loop where
994 // we make no progress and the application just calls us again
995 // without providing any new input. This would then result in
996 // LZMA_BUF_ERROR even though more output would be available
997 // once the worker threads decode more data.
998 //
999 // (2) Even if (1) is true, we will not wait if the previous call to
1000 // this function managed to produce some output and the output
1001 // buffer became full. This is for compatibility with applications
1002 // that call lzma_code() in such a way that new input is provided
1003 // only when the output buffer didn't become full. Without this
1004 // trick such applications would have bad performance (bad
1005 // parallelization due to decoder not getting input fast enough).
1006 //
1007 // NOTE: Such loops might require that timeout is disabled (0)
1008 // if they assume that output-not-full implies that all input has
1009 // been consumed. If and only if timeout is enabled, we may return
1010 // when output isn't full *and* not all input has been consumed.
1011 //
1012 // However, if LZMA_FINISH is used, the above is ignored and we always
1013 // wait (timeout can still cause us to return) because we know that
1014 // we won't get any more input. This matters if the input file is
1015 // truncated and we are doing single-shot decoding, that is,
1016 // timeout = 0 and LZMA_FINISH is used on the first call to
1017 // lzma_code() and the output buffer is known to be big enough
1018 // to hold all uncompressed data:
1019 //
1020 // - If LZMA_FINISH wasn't handled specially, we could return
1021 // LZMA_OK before providing all output that is possible with the
1022 // truncated input. The rest would be available if lzma_code() was
1023 // called again but then it's not single-shot decoding anymore.
1024 //
1025 // - By handling LZMA_FINISH specially here, the first call will
1026 // produce all the output, matching the behavior of the
1027 // single-threaded decoder.
1028 //
1029 // So it's a very specific corner case but also easy to avoid. Note
1030 // that this special handling of LZMA_FINISH has no effect for
1031 // single-shot decoding when the input file is valid (not truncated);
1032 // premature LZMA_OK wouldn't be possible as long as timeout = 0.
1033 const bool waiting_allowed = action == LZMA_FINISH
1034 || (*in_pos == in_size && !coder->out_was_filled);
1035 coder->out_was_filled = false;
1036
1037 while (true)
1038 switch (coder->sequence) {
1039 case SEQ_STREAM_HEADER: {
1040 // Copy the Stream Header to the internal buffer.
1041 const size_t in_old = *in_pos;
1042 lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
1043 LZMA_STREAM_HEADER_SIZE);
1044 coder->progress_in += *in_pos - in_old;
1045
1046 // Return if we didn't get the whole Stream Header yet.
1047 if (coder->pos < LZMA_STREAM_HEADER_SIZE)
1048 return LZMA_OK;
1049
1050 coder->pos = 0;
1051
1052 // Decode the Stream Header.
1053 const lzma_ret ret = lzma_stream_header_decode(
1054 &coder->stream_flags, coder->buffer);
1055 if (ret != LZMA_OK)
1056 return ret == LZMA_FORMAT_ERROR && !coder->first_stream
1057 ? LZMA_DATA_ERROR : ret;
1058
1059 // If we are decoding concatenated Streams, and the later
1060 // Streams have invalid Header Magic Bytes, we give
1061 // LZMA_DATA_ERROR instead of LZMA_FORMAT_ERROR.
1062 coder->first_stream = false;
1063
1064 // Copy the type of the Check so that Block Header and Block
1065 // decoders see it.
1066 coder->block_options.check = coder->stream_flags.check;
1067
1068 // Even if we return LZMA_*_CHECK below, we want
1069 // to continue from Block Header decoding.
1070 coder->sequence = SEQ_BLOCK_HEADER;
1071
1072 // Detect if there's no integrity check or if it is
1073 // unsupported if those were requested by the application.
1074 if (coder->tell_no_check && coder->stream_flags.check
1075 == LZMA_CHECK_NONE)
1076 return LZMA_NO_CHECK;
1077
1078 if (coder->tell_unsupported_check
1079 && !lzma_check_is_supported(
1080 coder->stream_flags.check))
1081 return LZMA_UNSUPPORTED_CHECK;
1082
1083 if (coder->tell_any_check)
1084 return LZMA_GET_CHECK;
1085 }
1086
1087 // Fall through
1088
1089 case SEQ_BLOCK_HEADER: {
1090 const size_t in_old = *in_pos;
1091 const lzma_ret ret = decode_block_header(coder, allocator,
1092 in, in_pos, in_size);
1093 coder->progress_in += *in_pos - in_old;
1094
1095 if (ret == LZMA_OK) {
1096 // We didn't decode the whole Block Header yet.
1097 //
1098 // Read output from the queue before returning. This
1099 // is important because it is possible that the
1100 // application doesn't have any new input available
1101 // immediately. If we didn't try to copy output from
1102 // the output queue here, lzma_code() could end up
1103 // returning LZMA_BUF_ERROR even though queued output
1104 // is available.
1105 //
1106 // If the lzma_code() call provided at least one input
1107 // byte, only copy as much data from the output queue
1108 // as is available immediately. This way the
1109 // application will be able to provide more input
1110 // without a delay.
1111 //
1112 // On the other hand, if lzma_code() was called with
1113 // an empty input buffer(*), treat it specially: try
1114 // to fill the output buffer even if it requires
1115 // waiting for the worker threads to provide output
1116 // (timeout, if specified, can still cause us to
1117 // return).
1118 //
1119 // - This way the application will be able to get all
1120 // data that can be decoded from the input provided
1121 // so far.
1122 //
1123 // - We avoid both premature LZMA_BUF_ERROR and
1124 // busy-waiting where the application repeatedly
1125 // calls lzma_code() which immediately returns
1126 // LZMA_OK without providing new data.
1127 //
1128 // - If the queue becomes empty, we won't wait
1129 // anything and will return LZMA_OK immediately
1130 // (coder->timeout is completely ignored).
1131 //
1132 // (*) See the comment at the beginning of this
1133 // function how waiting_allowed is determined
1134 // and why there is an exception to the rule
1135 // of "called with an empty input buffer".
1136 assert(*in_pos == in_size);
1137
1138 // If LZMA_FINISH was used we know that we won't get
1139 // more input, so the file must be truncated if we
1140 // get here. If worker threads don't detect any
1141 // errors, eventually there will be no more output
1142 // while we keep returning LZMA_OK which gets
1143 // converted to LZMA_BUF_ERROR in lzma_code().
1144 //
1145 // If fail-fast is enabled then we will return
1146 // immediately using LZMA_DATA_ERROR instead of
1147 // LZMA_OK or LZMA_BUF_ERROR. Rationale for the
1148 // error code:
1149 //
1150 // - Worker threads may have a large amount of
1151 // not-yet-decoded input data and we don't
1152 // know for sure if all data is valid. Bad
1153 // data there would result in LZMA_DATA_ERROR
1154 // when fail-fast isn't used.
1155 //
1156 // - Immediate LZMA_BUF_ERROR would be a bit weird
1157 // considering the older liblzma code. lzma_code()
1158 // even has an assertion to prevent coders from
1159 // returning LZMA_BUF_ERROR directly.
1160 //
1161 // The downside of this is that with fail-fast apps
1162 // cannot always distinguish between corrupt and
1163 // truncated files.
1164 if (action == LZMA_FINISH && coder->fail_fast) {
1165 // We won't produce any more output. Stop
1166 // the unfinished worker threads so they
1167 // won't waste CPU time.
1168 threads_stop(coder);
1169 return LZMA_DATA_ERROR;
1170 }
1171
1172 // read_output_and_wait() will call threads_stop()
1173 // if needed so with that we can use return_if_error.
1174 return_if_error(read_output_and_wait(coder, allocator,
1175 out, out_pos, out_size,
1176 NULL, waiting_allowed,
1177 &wait_abs, &has_blocked));
1178
1179 if (coder->pending_error != LZMA_OK) {
1180 coder->sequence = SEQ_ERROR;
1181 break;
1182 }
1183
1184 return LZMA_OK;
1185 }
1186
1187 if (ret == LZMA_INDEX_DETECTED) {
1188 coder->sequence = SEQ_INDEX_WAIT_OUTPUT;
1189 break;
1190 }
1191
1192 // See if an error occurred.
1193 if (ret != LZMA_STREAM_END) {
1194 // NOTE: Here and in all other places where
1195 // pending_error is set, it may overwrite the value
1196 // (LZMA_PROG_ERROR) set by read_output_and_wait().
1197 // That function might overwrite value set here too.
1198 // These are fine because when read_output_and_wait()
1199 // sets pending_error, it actually works as a flag
1200 // variable only ("some error has occurred") and the
1201 // actual value of pending_error is not used in
1202 // SEQ_ERROR. In such cases SEQ_ERROR will eventually
1203 // get the correct error code from the return value of
1204 // a later read_output_and_wait() call.
1205 coder->pending_error = ret;
1206 coder->sequence = SEQ_ERROR;
1207 break;
1208 }
1209
1210 // Calculate the memory usage of the filters / Block decoder.
1211 coder->mem_next_filters = lzma_raw_decoder_memusage(
1212 coder->filters);
1213
1214 if (coder->mem_next_filters == UINT64_MAX) {
1215 // One or more unknown Filter IDs.
1216 coder->pending_error = LZMA_OPTIONS_ERROR;
1217 coder->sequence = SEQ_ERROR;
1218 break;
1219 }
1220
1221 coder->sequence = SEQ_BLOCK_INIT;
1222 }
1223
1224 // Fall through
1225
1226 case SEQ_BLOCK_INIT: {
1227 // Check if decoding is possible at all with the current
1228 // memlimit_stop which we must never exceed.
1229 //
1230 // This needs to be the first thing in SEQ_BLOCK_INIT
1231 // to make it possible to restart decoding after increasing
1232 // memlimit_stop with lzma_memlimit_set().
1233 if (coder->mem_next_filters > coder->memlimit_stop) {
1234 // Flush pending output before returning
1235 // LZMA_MEMLIMIT_ERROR. If the application doesn't
1236 // want to increase the limit, at least it will get
1237 // all the output possible so far.
1238 return_if_error(read_output_and_wait(coder, allocator,
1239 out, out_pos, out_size,
1240 NULL, true, &wait_abs, &has_blocked));
1241
1242 if (!lzma_outq_is_empty(&coder->outq))
1243 return LZMA_OK;
1244
1245 return LZMA_MEMLIMIT_ERROR;
1246 }
1247
1248 // Check if the size information is available in Block Header.
1249 // If it is, check if the sizes are small enough that we don't
1250 // need to worry *too* much about integer overflows later in
1251 // the code. If these conditions are not met, we must use the
1252 // single-threaded direct mode.
1253 if (is_direct_mode_needed(coder->block_options.compressed_size)
1254 || is_direct_mode_needed(
1255 coder->block_options.uncompressed_size)) {
1256 coder->sequence = SEQ_BLOCK_DIRECT_INIT;
1257 break;
1258 }
1259
1260 // Calculate the amount of memory needed for the input and
1261 // output buffers in threaded mode.
1262 //
1263 // These cannot overflow because we already checked that
1264 // the sizes are small enough using is_direct_mode_needed().
1265 coder->mem_next_in = comp_blk_size(coder);
1266 const uint64_t mem_buffers = coder->mem_next_in
1267 + lzma_outq_outbuf_memusage(
1268 coder->block_options.uncompressed_size);
1269
1270 // Add the amount needed by the filters.
1271 // Avoid integer overflows.
1272 if (UINT64_MAX - mem_buffers < coder->mem_next_filters) {
1273 // Use direct mode if the memusage would overflow.
1274 // This is a theoretical case that shouldn't happen
1275 // in practice unless the input file is weird (broken
1276 // or malicious).
1277 coder->sequence = SEQ_BLOCK_DIRECT_INIT;
1278 break;
1279 }
1280
1281 // Amount of memory needed to decode this Block in
1282 // threaded mode:
1283 coder->mem_next_block = coder->mem_next_filters + mem_buffers;
1284
1285 // If this alone would exceed memlimit_threading, then we must
1286 // use the single-threaded direct mode.
1287 if (coder->mem_next_block > coder->memlimit_threading) {
1288 coder->sequence = SEQ_BLOCK_DIRECT_INIT;
1289 break;
1290 }
1291
1292 // Use the threaded mode. Free the direct mode decoder in
1293 // case it has been initialized.
1294 lzma_next_end(&coder->block_decoder, allocator);
1295 coder->mem_direct_mode = 0;
1296
1297 // Since we already know what the sizes are supposed to be,
1298 // we can already add them to the Index hash. The Block
1299 // decoder will verify the values while decoding.
1300 const lzma_ret ret = lzma_index_hash_append(coder->index_hash,
1301 lzma_block_unpadded_size(
1302 &coder->block_options),
1303 coder->block_options.uncompressed_size);
1304 if (ret != LZMA_OK) {
1305 coder->pending_error = ret;
1306 coder->sequence = SEQ_ERROR;
1307 break;
1308 }
1309
1310 coder->sequence = SEQ_BLOCK_THR_INIT;
1311 }
1312
1313 // Fall through
1314
1315 case SEQ_BLOCK_THR_INIT: {
1316 // We need to wait for a multiple conditions to become true
1317 // until we can initialize the Block decoder and let a worker
1318 // thread decode it:
1319 //
1320 // - Wait for the memory usage of the active threads to drop
1321 // so that starting the decoding of this Block won't make
1322 // us go over memlimit_threading.
1323 //
1324 // - Wait for at least one free output queue slot.
1325 //
1326 // - Wait for a free worker thread.
1327 //
1328 // While we wait, we must copy decompressed data to the out
1329 // buffer and catch possible decoder errors.
1330 //
1331 // read_output_and_wait() does all the above.
1332 bool block_can_start = false;
1333
1334 return_if_error(read_output_and_wait(coder, allocator,
1335 out, out_pos, out_size,
1336 &block_can_start, true,
1337 &wait_abs, &has_blocked));
1338
1339 if (coder->pending_error != LZMA_OK) {
1340 coder->sequence = SEQ_ERROR;
1341 break;
1342 }
1343
1344 if (!block_can_start) {
1345 // It's not a timeout because return_if_error handles
1346 // it already. Output queue cannot be empty either
1347 // because in that case block_can_start would have
1348 // been true. Thus the output buffer must be full and
1349 // the queue isn't empty.
1350 assert(*out_pos == out_size);
1351 assert(!lzma_outq_is_empty(&coder->outq));
1352 return LZMA_OK;
1353 }
1354
1355 // We know that we can start decoding this Block without
1356 // exceeding memlimit_threading. However, to stay below
1357 // memlimit_threading may require freeing some of the
1358 // cached memory.
1359 //
1360 // Get a local copy of variables that require locking the
1361 // mutex. It is fine if the worker threads modify the real
1362 // values after we read these as those changes can only be
1363 // towards more favorable conditions (less memory in use,
1364 // more in cache).
1365#ifndef VBOX
1366 uint64_t mem_in_use;
1367 uint64_t mem_cached;
1368#else
1369 uint64_t mem_in_use = 0; /* Shut up msc who can't grok the mythread_sync construct below. */
1370 uint64_t mem_cached = 0;
1371#endif
1372 struct worker_thread *thr = NULL; // Init to silence warning.
1373
1374 mythread_sync(coder->mutex) {
1375 mem_in_use = coder->mem_in_use;
1376 mem_cached = coder->mem_cached;
1377 thr = coder->threads_free;
1378 }
1379
1380 // The maximum amount of memory that can be held by other
1381 // threads and cached buffers while allowing us to start
1382 // decoding the next Block.
1383 const uint64_t mem_max = coder->memlimit_threading
1384 - coder->mem_next_block;
1385
1386 // If the existing allocations are so large that starting
1387 // to decode this Block might exceed memlimit_threads,
1388 // try to free memory from the output queue cache first.
1389 //
1390 // NOTE: This math assumes the worst case. It's possible
1391 // that the limit wouldn't be exceeded if the existing cached
1392 // allocations are reused.
1393 if (mem_in_use + mem_cached + coder->outq.mem_allocated
1394 > mem_max) {
1395 // Clear the outq cache except leave one buffer in
1396 // the cache if its size is correct. That way we
1397 // don't free and almost immediately reallocate
1398 // an identical buffer.
1399 lzma_outq_clear_cache2(&coder->outq, allocator,
1400 coder->block_options.uncompressed_size);
1401 }
1402
1403 // If there is at least one worker_thread in the cache and
1404 // the existing allocations are so large that starting to
1405 // decode this Block might exceed memlimit_threads, free
1406 // memory by freeing cached Block decoders.
1407 //
1408 // NOTE: The comparison is different here than above.
1409 // Here we don't care about cached buffers in outq anymore
1410 // and only look at memory actually in use. This is because
1411 // if there is something in outq cache, it's a single buffer
1412 // that can be used as is. We ensured this in the above
1413 // if-block.
1414 uint64_t mem_freed = 0;
1415 if (thr != NULL && mem_in_use + mem_cached
1416 + coder->outq.mem_in_use > mem_max) {
1417 // Don't free the first Block decoder if its memory
1418 // usage isn't greater than what this Block will need.
1419 // Typically the same filter chain is used for all
1420 // Blocks so this way the allocations can be reused
1421 // when get_thread() picks the first worker_thread
1422 // from the cache.
1423 if (thr->mem_filters <= coder->mem_next_filters)
1424 thr = thr->next;
1425
1426 while (thr != NULL) {
1427 lzma_next_end(&thr->block_decoder, allocator);
1428 mem_freed += thr->mem_filters;
1429 thr->mem_filters = 0;
1430 thr = thr->next;
1431 }
1432 }
1433
1434 // Update the memory usage counters. Note that coder->mem_*
1435 // may have changed since we read them so we must substract
1436 // or add the changes.
1437 mythread_sync(coder->mutex) {
1438 coder->mem_cached -= mem_freed;
1439
1440 // Memory needed for the filters and the input buffer.
1441 // The output queue takes care of its own counter so
1442 // we don't touch it here.
1443 //
1444 // NOTE: After this, coder->mem_in_use +
1445 // coder->mem_cached might count the same thing twice.
1446 // If so, this will get corrected in get_thread() when
1447 // a worker_thread is picked from coder->free_threads
1448 // and its memory usage is substracted from mem_cached.
1449 coder->mem_in_use += coder->mem_next_in
1450 + coder->mem_next_filters;
1451 }
1452
1453 // Allocate memory for the output buffer in the output queue.
1454 lzma_ret ret = lzma_outq_prealloc_buf(
1455 &coder->outq, allocator,
1456 coder->block_options.uncompressed_size);
1457 if (ret != LZMA_OK) {
1458 threads_stop(coder);
1459 return ret;
1460 }
1461
1462 // Set up coder->thr.
1463 ret = get_thread(coder, allocator);
1464 if (ret != LZMA_OK) {
1465 threads_stop(coder);
1466 return ret;
1467 }
1468
1469 // The new Block decoder memory usage is already counted in
1470 // coder->mem_in_use. Store it in the thread too.
1471 coder->thr->mem_filters = coder->mem_next_filters;
1472
1473 // Initialize the Block decoder.
1474 coder->thr->block_options = coder->block_options;
1475 ret = lzma_block_decoder_init(
1476 &coder->thr->block_decoder, allocator,
1477 &coder->thr->block_options);
1478
1479 // Free the allocated filter options since they are needed
1480 // only to initialize the Block decoder.
1481 lzma_filters_free(coder->filters, allocator);
1482 coder->thr->block_options.filters = NULL;
1483
1484 // Check if memory usage calculation and Block encoder
1485 // initialization succeeded.
1486 if (ret != LZMA_OK) {
1487 coder->pending_error = ret;
1488 coder->sequence = SEQ_ERROR;
1489 break;
1490 }
1491
1492 // Allocate the input buffer.
1493 coder->thr->in_size = coder->mem_next_in;
1494 coder->thr->in = lzma_alloc(coder->thr->in_size, allocator);
1495 if (coder->thr->in == NULL) {
1496 threads_stop(coder);
1497 return LZMA_MEM_ERROR;
1498 }
1499
1500 // Get the preallocated output buffer.
1501 coder->thr->outbuf = lzma_outq_get_buf(
1502 &coder->outq, coder->thr);
1503
1504 // Start the decoder.
1505 mythread_sync(coder->thr->mutex) {
1506 assert(coder->thr->state == THR_IDLE);
1507 coder->thr->state = THR_RUN;
1508 mythread_cond_signal(&coder->thr->cond);
1509 }
1510
1511 // Enable output from the thread that holds the oldest output
1512 // buffer in the output queue (if such a thread exists).
1513 mythread_sync(coder->mutex) {
1514 lzma_outq_enable_partial_output(&coder->outq,
1515 &worker_enable_partial_update);
1516 }
1517
1518 coder->sequence = SEQ_BLOCK_THR_RUN;
1519 }
1520
1521 // Fall through
1522
1523 case SEQ_BLOCK_THR_RUN: {
1524 if (action == LZMA_FINISH && coder->fail_fast) {
1525 // We know that we won't get more input and that
1526 // the caller wants fail-fast behavior. If we see
1527 // that we don't have enough input to finish this
1528 // Block, return LZMA_DATA_ERROR immediately.
1529 // See SEQ_BLOCK_HEADER for the error code rationale.
1530 const size_t in_avail = in_size - *in_pos;
1531 const size_t in_needed = coder->thr->in_size
1532 - coder->thr->in_filled;
1533 if (in_avail < in_needed) {
1534 threads_stop(coder);
1535 return LZMA_DATA_ERROR;
1536 }
1537 }
1538
1539 // Copy input to the worker thread.
1540 size_t cur_in_filled = coder->thr->in_filled;
1541 lzma_bufcpy(in, in_pos, in_size, coder->thr->in,
1542 &cur_in_filled, coder->thr->in_size);
1543
1544 // Tell the thread how much we copied.
1545 mythread_sync(coder->thr->mutex) {
1546 coder->thr->in_filled = cur_in_filled;
1547
1548 // NOTE: Most of the time we are copying input faster
1549 // than the thread can decode so most of the time
1550 // calling mythread_cond_signal() is useless but
1551 // we cannot make it conditional because thr->in_pos
1552 // is updated without a mutex. And the overhead should
1553 // be very much negligible anyway.
1554 mythread_cond_signal(&coder->thr->cond);
1555 }
1556
1557 // Read output from the output queue. Just like in
1558 // SEQ_BLOCK_HEADER, we wait to fill the output buffer
1559 // only if waiting_allowed was set to true in the beginning
1560 // of this function (see the comment there).
1561 return_if_error(read_output_and_wait(coder, allocator,
1562 out, out_pos, out_size,
1563 NULL, waiting_allowed,
1564 &wait_abs, &has_blocked));
1565
1566 if (coder->pending_error != LZMA_OK) {
1567 coder->sequence = SEQ_ERROR;
1568 break;
1569 }
1570
1571 // Return if the input didn't contain the whole Block.
1572 if (coder->thr->in_filled < coder->thr->in_size) {
1573 assert(*in_pos == in_size);
1574 return LZMA_OK;
1575 }
1576
1577 // The whole Block has been copied to the thread-specific
1578 // buffer. Continue from the next Block Header or Index.
1579 coder->thr = NULL;
1580 coder->sequence = SEQ_BLOCK_HEADER;
1581 break;
1582 }
1583
1584 case SEQ_BLOCK_DIRECT_INIT: {
1585 // Wait for the threads to finish and that all decoded data
1586 // has been copied to the output. That is, wait until the
1587 // output queue becomes empty.
1588 //
1589 // NOTE: No need to check for coder->pending_error as
1590 // we aren't consuming any input until the queue is empty
1591 // and if there is a pending error, read_output_and_wait()
1592 // will eventually return it before the queue is empty.
1593 return_if_error(read_output_and_wait(coder, allocator,
1594 out, out_pos, out_size,
1595 NULL, true, &wait_abs, &has_blocked));
1596 if (!lzma_outq_is_empty(&coder->outq))
1597 return LZMA_OK;
1598
1599 // Free the cached output buffers.
1600 lzma_outq_clear_cache(&coder->outq, allocator);
1601
1602 // Get rid of the worker threads, including the coder->threads
1603 // array.
1604 threads_end(coder, allocator);
1605
1606 // Initialize the Block decoder.
1607 const lzma_ret ret = lzma_block_decoder_init(
1608 &coder->block_decoder, allocator,
1609 &coder->block_options);
1610
1611 // Free the allocated filter options since they are needed
1612 // only to initialize the Block decoder.
1613 lzma_filters_free(coder->filters, allocator);
1614 coder->block_options.filters = NULL;
1615
1616 // Check if Block decoder initialization succeeded.
1617 if (ret != LZMA_OK)
1618 return ret;
1619
1620 // Make the memory usage visible to _memconfig().
1621 coder->mem_direct_mode = coder->mem_next_filters;
1622
1623 coder->sequence = SEQ_BLOCK_DIRECT_RUN;
1624 }
1625
1626 // Fall through
1627
1628 case SEQ_BLOCK_DIRECT_RUN: {
1629 const size_t in_old = *in_pos;
1630 const size_t out_old = *out_pos;
1631 const lzma_ret ret = coder->block_decoder.code(
1632 coder->block_decoder.coder, allocator,
1633 in, in_pos, in_size, out, out_pos, out_size,
1634 action);
1635 coder->progress_in += *in_pos - in_old;
1636 coder->progress_out += *out_pos - out_old;
1637
1638 if (ret != LZMA_STREAM_END)
1639 return ret;
1640
1641 // Block decoded successfully. Add the new size pair to
1642 // the Index hash.
1643 return_if_error(lzma_index_hash_append(coder->index_hash,
1644 lzma_block_unpadded_size(
1645 &coder->block_options),
1646 coder->block_options.uncompressed_size));
1647
1648 coder->sequence = SEQ_BLOCK_HEADER;
1649 break;
1650 }
1651
1652 case SEQ_INDEX_WAIT_OUTPUT:
1653 // Flush the output from all worker threads so that we can
1654 // decode the Index without thinking about threading.
1655 return_if_error(read_output_and_wait(coder, allocator,
1656 out, out_pos, out_size,
1657 NULL, true, &wait_abs, &has_blocked));
1658
1659 if (!lzma_outq_is_empty(&coder->outq))
1660 return LZMA_OK;
1661
1662 coder->sequence = SEQ_INDEX_DECODE;
1663
1664 // Fall through
1665
1666 case SEQ_INDEX_DECODE: {
1667 // If we don't have any input, don't call
1668 // lzma_index_hash_decode() since it would return
1669 // LZMA_BUF_ERROR, which we must not do here.
1670 if (*in_pos >= in_size)
1671 return LZMA_OK;
1672
1673 // Decode the Index and compare it to the hash calculated
1674 // from the sizes of the Blocks (if any).
1675 const size_t in_old = *in_pos;
1676 const lzma_ret ret = lzma_index_hash_decode(coder->index_hash,
1677 in, in_pos, in_size);
1678 coder->progress_in += *in_pos - in_old;
1679 if (ret != LZMA_STREAM_END)
1680 return ret;
1681
1682 coder->sequence = SEQ_STREAM_FOOTER;
1683 }
1684
1685 // Fall through
1686
1687 case SEQ_STREAM_FOOTER: {
1688 // Copy the Stream Footer to the internal buffer.
1689 const size_t in_old = *in_pos;
1690 lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
1691 LZMA_STREAM_HEADER_SIZE);
1692 coder->progress_in += *in_pos - in_old;
1693
1694 // Return if we didn't get the whole Stream Footer yet.
1695 if (coder->pos < LZMA_STREAM_HEADER_SIZE)
1696 return LZMA_OK;
1697
1698 coder->pos = 0;
1699
1700 // Decode the Stream Footer. The decoder gives
1701 // LZMA_FORMAT_ERROR if the magic bytes don't match,
1702 // so convert that return code to LZMA_DATA_ERROR.
1703 lzma_stream_flags footer_flags;
1704 const lzma_ret ret = lzma_stream_footer_decode(
1705 &footer_flags, coder->buffer);
1706 if (ret != LZMA_OK)
1707 return ret == LZMA_FORMAT_ERROR
1708 ? LZMA_DATA_ERROR : ret;
1709
1710 // Check that Index Size stored in the Stream Footer matches
1711 // the real size of the Index field.
1712 if (lzma_index_hash_size(coder->index_hash)
1713 != footer_flags.backward_size)
1714 return LZMA_DATA_ERROR;
1715
1716 // Compare that the Stream Flags fields are identical in
1717 // both Stream Header and Stream Footer.
1718 return_if_error(lzma_stream_flags_compare(
1719 &coder->stream_flags, &footer_flags));
1720
1721 if (!coder->concatenated)
1722 return LZMA_STREAM_END;
1723
1724 coder->sequence = SEQ_STREAM_PADDING;
1725 }
1726
1727 // Fall through
1728
1729 case SEQ_STREAM_PADDING:
1730 assert(coder->concatenated);
1731
1732 // Skip over possible Stream Padding.
1733 while (true) {
1734 if (*in_pos >= in_size) {
1735 // Unless LZMA_FINISH was used, we cannot
1736 // know if there's more input coming later.
1737 if (action != LZMA_FINISH)
1738 return LZMA_OK;
1739
1740 // Stream Padding must be a multiple of
1741 // four bytes.
1742 return coder->pos == 0
1743 ? LZMA_STREAM_END
1744 : LZMA_DATA_ERROR;
1745 }
1746
1747 // If the byte is not zero, it probably indicates
1748 // beginning of a new Stream (or the file is corrupt).
1749 if (in[*in_pos] != 0x00)
1750 break;
1751
1752 ++*in_pos;
1753 ++coder->progress_in;
1754 coder->pos = (coder->pos + 1) & 3;
1755 }
1756
1757 // Stream Padding must be a multiple of four bytes (empty
1758 // Stream Padding is OK).
1759 if (coder->pos != 0) {
1760 ++*in_pos;
1761 ++coder->progress_in;
1762 return LZMA_DATA_ERROR;
1763 }
1764
1765 // Prepare to decode the next Stream.
1766 return_if_error(stream_decoder_reset(coder, allocator));
1767 break;
1768
1769 case SEQ_ERROR:
1770 if (!coder->fail_fast) {
1771 // Let the application get all data before the point
1772 // where the error was detected. This matches the
1773 // behavior of single-threaded use.
1774 //
1775 // FIXME? Some errors (LZMA_MEM_ERROR) don't get here,
1776 // they are returned immediately. Thus in rare cases
1777 // the output will be less than in the single-threaded
1778 // mode. Maybe this doesn't matter much in practice.
1779 return_if_error(read_output_and_wait(coder, allocator,
1780 out, out_pos, out_size,
1781 NULL, true, &wait_abs, &has_blocked));
1782
1783 // We get here only if the error happened in the main
1784 // thread, for example, unsupported Block Header.
1785 if (!lzma_outq_is_empty(&coder->outq))
1786 return LZMA_OK;
1787 }
1788
1789 // We only get here if no errors were detected by the worker
1790 // threads. Errors from worker threads would have already been
1791 // returned by the call to read_output_and_wait() above.
1792 return coder->pending_error;
1793
1794 default:
1795 assert(0);
1796 return LZMA_PROG_ERROR;
1797 }
1798
1799 // Never reached
1800}
1801
1802
1803static void
1804stream_decoder_mt_end(void *coder_ptr, const lzma_allocator *allocator)
1805{
1806 struct lzma_stream_coder *coder = coder_ptr;
1807
1808 threads_end(coder, allocator);
1809 lzma_outq_end(&coder->outq, allocator);
1810
1811 lzma_next_end(&coder->block_decoder, allocator);
1812 lzma_filters_free(coder->filters, allocator);
1813 lzma_index_hash_end(coder->index_hash, allocator);
1814
1815 lzma_free(coder, allocator);
1816 return;
1817}
1818
1819
1820static lzma_check
1821stream_decoder_mt_get_check(const void *coder_ptr)
1822{
1823 const struct lzma_stream_coder *coder = coder_ptr;
1824 return coder->stream_flags.check;
1825}
1826
1827
1828static lzma_ret
1829stream_decoder_mt_memconfig(void *coder_ptr, uint64_t *memusage,
1830 uint64_t *old_memlimit, uint64_t new_memlimit)
1831{
1832 // NOTE: This function gets/sets memlimit_stop. For now,
1833 // memlimit_threading cannot be modified after initialization.
1834 //
1835 // *memusage will include cached memory too. Excluding cached memory
1836 // would be misleading and it wouldn't help the applications to
1837 // know how much memory is actually needed to decompress the file
1838 // because the higher the number of threads and the memlimits are
1839 // the more memory the decoder may use.
1840 //
1841 // Setting a new limit includes the cached memory too and too low
1842 // limits will be rejected. Alternative could be to free the cached
1843 // memory immediately if that helps to bring the limit down but
1844 // the current way is the simplest. It's unlikely that limit needs
1845 // to be lowered in the middle of a file anyway; the typical reason
1846 // to want a new limit is to increase after LZMA_MEMLIMIT_ERROR
1847 // and even such use isn't common.
1848 struct lzma_stream_coder *coder = coder_ptr;
1849
1850 mythread_sync(coder->mutex) {
1851 *memusage = coder->mem_direct_mode
1852 + coder->mem_in_use
1853 + coder->mem_cached
1854 + coder->outq.mem_allocated;
1855 }
1856
1857 // If no filter chains are allocated, *memusage may be zero.
1858 // Always return at least LZMA_MEMUSAGE_BASE.
1859 if (*memusage < LZMA_MEMUSAGE_BASE)
1860 *memusage = LZMA_MEMUSAGE_BASE;
1861
1862 *old_memlimit = coder->memlimit_stop;
1863
1864 if (new_memlimit != 0) {
1865 if (new_memlimit < *memusage)
1866 return LZMA_MEMLIMIT_ERROR;
1867
1868 coder->memlimit_stop = new_memlimit;
1869 }
1870
1871 return LZMA_OK;
1872}
1873
1874
1875static void
1876stream_decoder_mt_get_progress(void *coder_ptr,
1877 uint64_t *progress_in, uint64_t *progress_out)
1878{
1879 struct lzma_stream_coder *coder = coder_ptr;
1880
1881 // Lock coder->mutex to prevent finishing threads from moving their
1882 // progress info from the worker_thread structure to lzma_stream_coder.
1883 mythread_sync(coder->mutex) {
1884 *progress_in = coder->progress_in;
1885 *progress_out = coder->progress_out;
1886
1887 for (size_t i = 0; i < coder->threads_initialized; ++i) {
1888 mythread_sync(coder->threads[i].mutex) {
1889 *progress_in += coder->threads[i].progress_in;
1890 *progress_out += coder->threads[i]
1891 .progress_out;
1892 }
1893 }
1894 }
1895
1896 return;
1897}
1898
1899
1900static lzma_ret
1901stream_decoder_mt_init(lzma_next_coder *next, const lzma_allocator *allocator,
1902 const lzma_mt *options)
1903{
1904 struct lzma_stream_coder *coder;
1905
1906 if (options->threads == 0 || options->threads > LZMA_THREADS_MAX)
1907 return LZMA_OPTIONS_ERROR;
1908
1909 if (options->flags & ~LZMA_SUPPORTED_FLAGS)
1910 return LZMA_OPTIONS_ERROR;
1911
1912 lzma_next_coder_init(&stream_decoder_mt_init, next, allocator);
1913
1914 coder = next->coder;
1915 if (!coder) {
1916 coder = lzma_alloc(sizeof(struct lzma_stream_coder), allocator);
1917 if (coder == NULL)
1918 return LZMA_MEM_ERROR;
1919
1920 next->coder = coder;
1921
1922 if (mythread_mutex_init(&coder->mutex)) {
1923 lzma_free(coder, allocator);
1924 return LZMA_MEM_ERROR;
1925 }
1926
1927 if (mythread_cond_init(&coder->cond)) {
1928 mythread_mutex_destroy(&coder->mutex);
1929 lzma_free(coder, allocator);
1930 return LZMA_MEM_ERROR;
1931 }
1932
1933 next->code = &stream_decode_mt;
1934 next->end = &stream_decoder_mt_end;
1935 next->get_check = &stream_decoder_mt_get_check;
1936 next->memconfig = &stream_decoder_mt_memconfig;
1937 next->get_progress = &stream_decoder_mt_get_progress;
1938
1939 coder->filters[0].id = LZMA_VLI_UNKNOWN;
1940 memzero(&coder->outq, sizeof(coder->outq));
1941
1942 coder->block_decoder = LZMA_NEXT_CODER_INIT;
1943 coder->mem_direct_mode = 0;
1944
1945 coder->index_hash = NULL;
1946 coder->threads = NULL;
1947 coder->threads_free = NULL;
1948 coder->threads_initialized = 0;
1949 }
1950
1951 // Cleanup old filter chain if one remains after unfinished decoding
1952 // of a previous Stream.
1953 lzma_filters_free(coder->filters, allocator);
1954
1955 // By allocating threads from scratch we can start memory-usage
1956 // accounting from scratch, too. Changes in filter and block sizes may
1957 // affect number of threads.
1958 //
1959 // FIXME? Reusing should be easy but unlike the single-threaded
1960 // decoder, with some types of input file combinations reusing
1961 // could leave quite a lot of memory allocated but unused (first
1962 // file could allocate a lot, the next files could use fewer
1963 // threads and some of the allocations from the first file would not
1964 // get freed unless memlimit_threading forces us to clear caches).
1965 //
1966 // NOTE: The direct mode decoder isn't freed here if one exists.
1967 // It will be reused or freed as needed in the main loop.
1968 threads_end(coder, allocator);
1969
1970 // All memusage counters start at 0 (including mem_direct_mode).
1971 // The little extra that is needed for the structs in this file
1972 // get accounted well enough by the filter chain memory usage
1973 // which adds LZMA_MEMUSAGE_BASE for each chain. However,
1974 // stream_decoder_mt_memconfig() has to handle this specially so that
1975 // it will never return less than LZMA_MEMUSAGE_BASE as memory usage.
1976 coder->mem_in_use = 0;
1977 coder->mem_cached = 0;
1978 coder->mem_next_block = 0;
1979
1980 coder->progress_in = 0;
1981 coder->progress_out = 0;
1982
1983 coder->sequence = SEQ_STREAM_HEADER;
1984 coder->thread_error = LZMA_OK;
1985 coder->pending_error = LZMA_OK;
1986 coder->thr = NULL;
1987
1988 coder->timeout = options->timeout;
1989
1990 coder->memlimit_threading = my_max(1, options->memlimit_threading);
1991 coder->memlimit_stop = my_max(1, options->memlimit_stop);
1992 if (coder->memlimit_threading > coder->memlimit_stop)
1993 coder->memlimit_threading = coder->memlimit_stop;
1994
1995 coder->tell_no_check = (options->flags & LZMA_TELL_NO_CHECK) != 0;
1996 coder->tell_unsupported_check
1997 = (options->flags & LZMA_TELL_UNSUPPORTED_CHECK) != 0;
1998 coder->tell_any_check = (options->flags & LZMA_TELL_ANY_CHECK) != 0;
1999 coder->ignore_check = (options->flags & LZMA_IGNORE_CHECK) != 0;
2000 coder->concatenated = (options->flags & LZMA_CONCATENATED) != 0;
2001 coder->fail_fast = (options->flags & LZMA_FAIL_FAST) != 0;
2002
2003 coder->first_stream = true;
2004 coder->out_was_filled = false;
2005 coder->pos = 0;
2006
2007 coder->threads_max = options->threads;
2008
2009 return_if_error(lzma_outq_init(&coder->outq, allocator,
2010 coder->threads_max));
2011
2012 return stream_decoder_reset(coder, allocator);
2013}
2014
2015
2016extern LZMA_API(lzma_ret)
2017lzma_stream_decoder_mt(lzma_stream *strm, const lzma_mt *options)
2018{
2019 lzma_next_strm_init(stream_decoder_mt_init, strm, options);
2020
2021 strm->internal->supported_actions[LZMA_RUN] = true;
2022 strm->internal->supported_actions[LZMA_FINISH] = true;
2023
2024 return LZMA_OK;
2025}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette