view release on metacpan or search on metacpan
src/README.minGW view on Meta::CPAN
Basically, no version dependent capabilities are used so, this
should also work with older versions of the tools mentioned above.
Similarly, the GnuWin32 packages (which I just prefer over MSYS)
could likely be replaced by a properly installed MSYS environment.
Furthermore, the following preconditions should be met:
1. The folders 'bin' under both the MinGW and the GnuWin32
installation directory should have been added to the PATH
environment variable. Likely it is best adding these
directories permanently to PATH through the System
Properties dialog on the Control Panel.
2. The MinGW Make package only provides a 'mingw32-make.exe'
executable. There is no alias 'make.exe'. However, make is
preconfigured to use 'make' as the default $(MAKE) command.
This seems to be a bug in the MinGW GNU Make distribution.
Thus, a copy of 'mingw32-make.exe' named 'make.exe' should
be placed into MinGW's 'bin' directory.
src/Source/FreeImage.h view on Meta::CPAN
#define GIF_LOAD256 1 //! load the image as a 256 color image with ununsed palette entries, if it's 16 or 2 color
#define GIF_PLAYBACK 2 //! 'Play' the GIF to generate each frame (as 32bpp) instead of returning raw frame data when loading
#define HDR_DEFAULT 0
#define ICO_DEFAULT 0
#define ICO_MAKEALPHA 1 //! convert to 32bpp and create an alpha channel from the AND-mask when loading
#define IFF_DEFAULT 0
#define J2K_DEFAULT 0 //! save with a 16:1 rate
#define JP2_DEFAULT 0 //! save with a 16:1 rate
#define JPEG_DEFAULT 0 //! loading (see JPEG_FAST); saving (see JPEG_QUALITYGOOD|JPEG_SUBSAMPLING_420)
#define JPEG_FAST 0x0001 //! load the file as fast as possible, sacrificing some quality
#define JPEG_ACCURATE 0x0002 //! load the file with the best quality, sacrificing some speed
#define JPEG_CMYK 0x0004 //! load separated CMYK "as is" (use | to combine with other load flags)
#define JPEG_EXIFROTATE 0x0008 //! load and rotate according to Exif 'Orientation' tag if available
#define JPEG_GREYSCALE 0x0010 //! load and convert to a 8-bit greyscale image
#define JPEG_QUALITYSUPERB 0x80 //! save with superb quality (100:1)
#define JPEG_QUALITYGOOD 0x0100 //! save with good quality (75:1)
#define JPEG_QUALITYNORMAL 0x0200 //! save with normal quality (50:1)
#define JPEG_QUALITYAVERAGE 0x0400 //! save with average quality (25:1)
#define JPEG_QUALITYBAD 0x0800 //! save with bad quality (10:1)
#define JPEG_PROGRESSIVE 0x2000 //! save as a progressive-JPEG (use | to combine with other save flags)
#define JPEG_SUBSAMPLING_411 0x1000 //! save with high 4x1 chroma subsampling (4:1:1)
src/Source/FreeImage/NNQuantizer.cpp view on Meta::CPAN
netindex[previouscol] = (startpos+maxnetpos)>>1;
for (j = previouscol+1; j < 256; j++)
netindex[j] = maxnetpos; // really 256
}
///////////////////////////////////////////////////////////////////////////////
// Search for BGR values 0..255 (after net is unbiased) and return colour index
// ----------------------------------------------------------------------------
int NNQuantizer::inxsearch(int b, int g, int r) {
int i, j, dist, a, bestd;
int *p;
int best;
bestd = 1000; // biggest possible dist is 256*3
best = -1;
i = netindex[g]; // index on g
j = i-1; // start at netindex[g] and work outwards
while ((i < netsize) || (j >= 0)) {
if (i < netsize) {
p = network[i];
dist = p[FI_RGBA_GREEN] - g; // inx key
if (dist >= bestd)
i = netsize; // stop iter
else {
i++;
if (dist < 0)
dist = -dist;
a = p[FI_RGBA_BLUE] - b;
if (a < 0)
a = -a;
dist += a;
if (dist < bestd) {
a = p[FI_RGBA_RED] - r;
if (a<0)
a = -a;
dist += a;
if (dist < bestd) {
bestd = dist;
best = p[3];
}
}
}
}
if (j >= 0) {
p = network[j];
dist = g - p[FI_RGBA_GREEN]; // inx key - reverse dif
if (dist >= bestd)
j = -1; // stop iter
else {
j--;
if (dist < 0)
dist = -dist;
a = p[FI_RGBA_BLUE] - b;
if (a<0)
a = -a;
dist += a;
if (dist < bestd) {
a = p[FI_RGBA_RED] - r;
if (a<0)
a = -a;
dist += a;
if (dist < bestd) {
bestd = dist;
best = p[3];
}
}
}
}
}
return best;
}
///////////////////////////////
// Search for biased BGR values
// ----------------------------
int NNQuantizer::contest(int b, int g, int r) {
// finds closest neuron (min dist) and updates freq
// finds best neuron (min dist-bias) and returns position
// for frequently chosen neurons, freq[i] is high and bias[i] is negative
// bias[i] = gamma*((1/netsize)-freq[i])
int i,dist,a,biasdist,betafreq;
int bestpos,bestbiaspos,bestd,bestbiasd;
int *p,*f, *n;
bestd = ~(((int) 1)<<31);
bestbiasd = bestd;
bestpos = -1;
bestbiaspos = bestpos;
p = bias;
f = freq;
for (i = 0; i < netsize; i++) {
n = network[i];
dist = n[FI_RGBA_BLUE] - b;
if (dist < 0)
dist = -dist;
a = n[FI_RGBA_GREEN] - g;
if (a < 0)
a = -a;
dist += a;
a = n[FI_RGBA_RED] - r;
if (a < 0)
a = -a;
dist += a;
if (dist < bestd) {
bestd = dist;
bestpos = i;
}
biasdist = dist - ((*p)>>(intbiasshift-netbiasshift));
if (biasdist < bestbiasd) {
bestbiasd = biasdist;
bestbiaspos = i;
}
betafreq = (*f >> betashift);
*f++ -= betafreq;
*p++ += (betafreq << gammashift);
}
freq[bestpos] += beta;
bias[bestpos] -= betagamma;
return bestbiaspos;
}
///////////////////////////////////////////////////////
// Move neuron i towards biased (b,g,r) by factor alpha
// ----------------------------------------------------
void NNQuantizer::altersingle(int alpha, int i, int b, int g, int r) {
int *n;
n = network[i]; // alter hit neuron
src/Source/FreeImage/PluginPCX.cpp view on Meta::CPAN
for example because it is being downloaded while loaded, the plugin
might also not gracefully fail.
The Load function has the following parameters:
The first parameter (FreeImageIO *io) is a structure providing
function pointers in order to make use of FreeImage's IO redirection. Using
FreeImage's file i/o functions instead of standard ones it is garantueed
that all bitmap types, both current and future ones, can be loaded from
memory, file cabinets, the internet and more. The second parameter (fi_handle handle)
is a companion of FreeImageIO and can be best compared with the standard FILE* type,
in a generalized form.
The third parameter (int page) indicates wether we will be loading a certain page
in the bitmap or if we will load the default one. This parameter is only used if
the plugin supports multi-paged bitmaps, e.g. cabinet bitmaps that contain a series
of images or pages. If the plugin does support multi-paging, the page parameter
can contain either a number higher or equal to 0 to load a certain page, or -1 to
load the default page. If the plugin does not support multi-paging,
the page parameter is always -1.
src/Source/FreeImage/PluginRAW.cpp view on Meta::CPAN
static const char * DLL_CALLCONV
Description() {
return "RAW camera image";
}
static const char * DLL_CALLCONV
Extension() {
/**
Below are known RAW file extensions that you can check using FreeImage_GetFIFFromFormat.
If a file extension is not listed, that doesn't mean that you cannot load it.
Using FreeImage_GetFileType is the best way to know if a RAW file format is supported.
*/
static const char *raw_extensions =
"3fr," // Hasselblad Digital Camera Raw Image Format.
"arw," // Sony Digital Camera Raw Image Format for Alpha devices.
"bay," // Casio Digital Camera Raw File Format.
"bmq," // NuCore Raw Image File.
"cap," // Phase One Digital Camera Raw Image Format.
"cine," // Phantom Software Raw Image File.
"cr2," // Canon Digital Camera RAW Image Format version 2.0. These images are based on the TIFF image standard.
"crw," // Canon Digital Camera RAW Image Format version 1.0.
src/Source/LibJPEG/README view on Meta::CPAN
CompuServe Incorporated. GIF(sm) is a Service Mark property of
CompuServe Incorporated."
REFERENCES
==========
We recommend reading one or more of these references before trying to
understand the innards of the JPEG software.
The best short technical introduction to the JPEG compression algorithm is
Wallace, Gregory K. "The JPEG Still Picture Compression Standard",
Communications of the ACM, April 1991 (vol. 34 no. 4), pp. 30-44.
(Adjacent articles in that issue discuss MPEG motion picture compression,
applications of JPEG, and related topics.) If you don't have the CACM issue
handy, a PostScript file containing a revised version of Wallace's article is
available at http://www.ijg.org/files/wallace.ps.gz. The file (actually
a preprint for an article that appeared in IEEE Trans. Consumer Electronics)
omits the sample images that appeared in CACM, but it includes corrections
and some added material. Note: the Wallace article is copyright ACM and IEEE,
and it may not be used for commercial purposes.
A somewhat less technical, more leisurely introduction to JPEG can be found in
"The Data Compression Book" by Mark Nelson and Jean-loup Gailly, published by
M&T Books (New York), 2nd ed. 1996, ISBN 1-55851-434-1. This book provides
good explanations and example C code for a multitude of compression methods
including JPEG. It is an excellent source if you are comfortable reading C
code but don't know much about data compression in general. The book's JPEG
sample code is far from industrial-strength, but when you are ready to look
at a full implementation, you've got one here...
The best currently available description of JPEG is the textbook "JPEG Still
Image Data Compression Standard" by William B. Pennebaker and Joan L.
Mitchell, published by Van Nostrand Reinhold, 1993, ISBN 0-442-01272-1.
Price US$59.95, 638 pp. The book includes the complete text of the ISO JPEG
standards (DIS 10918-1 and draft DIS 10918-2).
Although this is by far the most detailed and comprehensive exposition of
JPEG publicly available, we point out that it is still missing an explanation
of the most essential properties and algorithms of the underlying DCT
technology.
If you think that you know about DCT-based JPEG after reading this book,
then you are in delusion. The real fundamentals and corresponding potential
src/Source/LibJPEG/README view on Meta::CPAN
send usenet/news.answers/jpeg-faq/part2
ACKNOWLEDGMENTS
===============
Thank to Juergen Bruder for providing me with a copy of the common DCT
algorithm article, only to find out that I had come to the same result
in a more direct and comprehensible way with a more generative approach.
Thank to Istvan Sebestyen and Joan L. Mitchell for inviting me to the
ITU JPEG (Study Group 16) meeting in Geneva, Switzerland.
Thank to Thomas Wiegand and Gary Sullivan for inviting me to the
Joint Video Team (MPEG & ITU) meeting in Geneva, Switzerland.
Thank to Thomas Richter and Daniel Lee for inviting me to the
ISO/IEC JTC1/SC29/WG1 (previously known as JPEG, together with ITU-T SG16)
meeting in Berlin, Germany.
Thank to John Korejwa and Massimo Ballerini for inviting me to
src/Source/LibJPEG/install.txt view on Meta::CPAN
CPUs, a 16x16=>32 bit multiply instruction is faster than a full 32x32=>32
bit multiply. Unfortunately there is no portable way to specify such a
multiplication in C, but some compilers can generate one when you use the
right combination of casts. See the MULTIPLYxxx macro definitions in
jdct.h. If your compiler makes "int" be 32 bits and "short" be 16 bits,
defining SHORTxSHORT_32 is fairly likely to work. When experimenting with
alternate definitions, be sure to test not only whether the code still works
(use the self-test), but also whether it is actually faster --- on some
compilers, alternate definitions may compute the right answer, yet be slower
than the default. Timing cjpeg on a large PGM (grayscale) input file is the
best way to check this, as the DCT will be the largest fraction of the runtime
in that mode. (Note: some of the distributed compiler-specific jconfig files
already contain #define switches to select appropriate MULTIPLYxxx
definitions.)
If your machine has sufficiently fast floating point hardware, you may find
that the float DCT method is faster than the integer DCT methods, even
after tweaking the integer multiply macros. In that case you may want to
make the float DCT be the default method. (The only objection to this is
that float DCT results may vary slightly across machines.) To do that, add
"#define JDCT_DEFAULT JDCT_FLOAT" to jconfig.h. Even if you don't change
src/Source/LibJPEG/jchuff.c view on Meta::CPAN
entropy->dc_count_ptrs[compptr->dc_tbl_no],
entropy->ac_count_ptrs[compptr->ac_tbl_no]);
entropy->saved.last_dc_val[ci] = MCU_data[blkn][0][0];
}
return TRUE;
}
/*
* Generate the best Huffman code table for the given counts, fill htbl.
*
* The JPEG standard requires that no symbol be assigned a codeword of all
* one bits (so that padding bits added at the end of a compressed segment
* can't look like a valid code). Because of the canonical ordering of
* codewords, this just means that there must be an unused slot in the
* longest codeword length category. Section K.2 of the JPEG spec suggests
* reserving such a slot by pretending that symbol 256 is a valid symbol
* with count 1. In theory that's not optimal; giving it count zero but
* including it in the symbol set anyway should give a better Huffman code.
* But the theoretically better code actually seems to come out worse in
* practice, because it produces more all-ones bytes (which incur stuffed
* zero bytes in the final file). In any case the difference is tiny.
*
* The JPEG standard requires Huffman codes to be no more than 16 bits long.
* If some symbols have a very small but nonzero probability, the Huffman tree
* must be adjusted to meet the code length restriction. We currently use
* the adjustment method suggested in JPEG section K.2. This method is *not*
* optimal; it may not choose the best possible limited-length code. But
* typically only very-low-frequency symbols will be given less-than-optimal
* lengths, so the code is almost optimal. Experimental comparisons against
* an optimal limited-length-code algorithm indicate that the difference is
* microscopic --- usually less than a hundredth of a percent of total size.
* So the extra complexity of an optimal algorithm doesn't seem worthwhile.
*/
LOCAL(void)
jpeg_gen_optimal_table (j_compress_ptr cinfo, JHUFF_TBL * htbl, long freq[])
{
src/Source/LibJPEG/jdatasrc.c view on Meta::CPAN
*
* In typical applications, this should read fresh data into the buffer
* (ignoring the current state of next_input_byte & bytes_in_buffer),
* reset the pointer & count to the start of the buffer, and return TRUE
* indicating that the buffer has been reloaded. It is not necessary to
* fill the buffer entirely, only to obtain at least one more byte.
*
* There is no such thing as an EOF return. If the end of the file has been
* reached, the routine has a choice of ERREXIT() or inserting fake data into
* the buffer. In most cases, generating a warning message and inserting a
* fake EOI marker is the best course of action --- this will allow the
* decompressor to output however much of the image is there. However,
* the resulting error message is misleading if the real problem is an empty
* input file, so we handle that case specially.
*
* In applications that need to be able to suspend compression due to input
* not being available yet, a FALSE return indicates that no more data can be
* obtained right now, but more may be forthcoming later. In this situation,
* the decompressor will return to its caller (with an indication of the
* number of scanlines it has read, if any). The application should resume
* decompression after it has loaded more data into the input buffer. Note
src/Source/LibJPEG/jdct.h view on Meta::CPAN
* pointed to by compptr->dct_table. The output data is to be placed into the
* sample array starting at a specified column. (Any row offset needed will
* be applied to the array pointer before it is passed to the IDCT code.)
* Note that the number of samples emitted by the IDCT routine is
* DCT_h_scaled_size * DCT_v_scaled_size.
*/
/* typedef inverse_DCT_method_ptr is declared in jpegint.h */
/*
* Each IDCT routine has its own ideas about the best dct_table element type.
*/
typedef MULTIPLIER ISLOW_MULT_TYPE; /* short or int, whichever is faster */
#if BITS_IN_JSAMPLE == 8
typedef MULTIPLIER IFAST_MULT_TYPE; /* 16 bits is OK, use short if faster */
#define IFAST_SCALE_BITS 2 /* fractional bits in scale factors */
#else
typedef INT32 IFAST_MULT_TYPE; /* need 32 bits for scaled quantizers */
#define IFAST_SCALE_BITS 13 /* fractional bits in scale factors */
#endif
src/Source/LibJPEG/jmemsys.h view on Meta::CPAN
* For conditions of distribution and use, see the accompanying README file.
*
* This include file defines the interface between the system-independent
* and system-dependent portions of the JPEG memory manager. No other
* modules need include it. (The system-independent portion is jmemmgr.c;
* there are several different versions of the system-dependent portion.)
*
* This file works as-is for the system-dependent memory managers supplied
* in the IJG distribution. You may need to modify it if you write a
* custom memory manager. If system-dependent changes are needed in
* this file, the best method is to #ifdef them based on a configuration
* symbol supplied in jconfig.h, as we have done with USE_MSDOS_MEMMGR
* and USE_MAC_MEMMGR.
*/
/* Short forms of external names for systems with brain-damaged linkers. */
#ifdef NEED_SHORT_EXTERNAL_NAMES
#define jpeg_get_small jGetSmall
#define jpeg_free_small jFreeSmall
src/Source/LibJPEG/jquant2.c view on Meta::CPAN
*
* Our method of efficiently finding nearest colors is based on the "locally
* sorted search" idea described by Heckbert and on the incremental distance
* calculation described by Spencer W. Thomas in chapter III.1 of Graphics
* Gems II (James Arvo, ed. Academic Press, 1991). Thomas points out that
* the distances from a given colormap entry to each cell of the histogram can
* be computed quickly using an incremental method: the differences between
* distances to adjacent cells themselves differ by a constant. This allows a
* fairly fast implementation of the "brute force" approach of computing the
* distance from every colormap entry to every histogram cell. Unfortunately,
* it needs a work array to hold the best-distance-so-far for each histogram
* cell (because the inner loop has to be over cells, not colormap entries).
* The work array elements have to be INT32s, so the work array would need
* 256Kb at our recommended precision. This is not feasible in DOS machines.
*
* To get around these problems, we apply Thomas' method to compute the
* nearest colors for only the cells within a small subbox of the histogram.
* The work array need be only as big as the subbox, so the memory usage
* problem is solved. Furthermore, we need not fill subboxes that are never
* referenced in pass2; many images use only part of the color gamut, so a
* fair amount of work is saved. An additional advantage of this
* approach is that we can apply Heckbert's locality criterion to quickly
* eliminate colormap entries that are far away from the subbox; typically
* three-fourths of the colormap entries are rejected by Heckbert's criterion,
* and we need not compute their distances to individual cells in the subbox.
* The speed of this approach is heavily influenced by the subbox size: too
* small means too much overhead, too big loses because Heckbert's criterion
* can't eliminate as many colormap entries. Empirically the best subbox
* size seems to be about 1/512th of the histogram (1/8th in each direction).
*
* Thomas' article also describes a refined method which is asymptotically
* faster than the brute-force method, but it is also far more complex and
* cannot efficiently be applied to small subboxes. It is therefore not
* useful for programs intended to be portable to DOS machines. On machines
* with plenty of memory, filling the whole histogram in one shot with Thomas'
* refined method might be faster than the present code --- but then again,
* it might not be any faster, and it's certainly more complicated.
*/
src/Source/LibJPEG/jquant2.c view on Meta::CPAN
#define BOX_C2_ELEMS (1<<BOX_C2_LOG)
#define BOX_C0_SHIFT (C0_SHIFT + BOX_C0_LOG)
#define BOX_C1_SHIFT (C1_SHIFT + BOX_C1_LOG)
#define BOX_C2_SHIFT (C2_SHIFT + BOX_C2_LOG)
/*
* The next three routines implement inverse colormap filling. They could
* all be folded into one big routine, but splitting them up this way saves
* some stack space (the mindist[] and bestdist[] arrays need not coexist)
* and may allow some compilers to produce better code by registerizing more
* inner-loop variables.
*/
LOCAL(int)
find_nearby_colors (j_decompress_ptr cinfo, int minc0, int minc1, int minc2,
JSAMPLE colorlist[])
/* Locate the colormap entries close enough to an update box to be candidates
* for the nearest entry to some cell(s) in the update box. The update box
* is specified by the center coordinates of its first cell. The number of
src/Source/LibJPEG/jquant2.c view on Meta::CPAN
ncolors = 0;
for (i = 0; i < numcolors; i++) {
if (mindist[i] <= minmaxdist)
colorlist[ncolors++] = (JSAMPLE) i;
}
return ncolors;
}
LOCAL(void)
find_best_colors (j_decompress_ptr cinfo, int minc0, int minc1, int minc2,
int numcolors, JSAMPLE colorlist[], JSAMPLE bestcolor[])
/* Find the closest colormap entry for each cell in the update box,
* given the list of candidate colors prepared by find_nearby_colors.
* Return the indexes of the closest entries in the bestcolor[] array.
* This routine uses Thomas' incremental distance calculation method to
* find the distance from a colormap entry to successive cells in the box.
*/
{
int ic0, ic1, ic2;
int i, icolor;
register INT32 * bptr; /* pointer into bestdist[] array */
JSAMPLE * cptr; /* pointer into bestcolor[] array */
INT32 dist0, dist1; /* initial distance values */
register INT32 dist2; /* current distance in inner loop */
INT32 xx0, xx1; /* distance increments */
register INT32 xx2;
INT32 inc0, inc1, inc2; /* initial values for increments */
/* This array holds the distance to the nearest-so-far color for each cell */
INT32 bestdist[BOX_C0_ELEMS * BOX_C1_ELEMS * BOX_C2_ELEMS];
/* Initialize best-distance for each cell of the update box */
bptr = bestdist;
for (i = BOX_C0_ELEMS*BOX_C1_ELEMS*BOX_C2_ELEMS-1; i >= 0; i--)
*bptr++ = 0x7FFFFFFFL;
/* For each color selected by find_nearby_colors,
* compute its distance to the center of each cell in the box.
* If that's less than best-so-far, update best distance and color number.
*/
/* Nominal steps between cell centers ("x" in Thomas article) */
#define STEP_C0 ((1 << C0_SHIFT) * C0_SCALE)
#define STEP_C1 ((1 << C1_SHIFT) * C1_SCALE)
#define STEP_C2 ((1 << C2_SHIFT) * C2_SCALE)
for (i = 0; i < numcolors; i++) {
icolor = GETJSAMPLE(colorlist[i]);
/* Compute (square of) distance from minc0/c1/c2 to this color */
src/Source/LibJPEG/jquant2.c view on Meta::CPAN
dist0 = inc0*inc0;
inc1 = (minc1 - GETJSAMPLE(cinfo->colormap[1][icolor])) * C1_SCALE;
dist0 += inc1*inc1;
inc2 = (minc2 - GETJSAMPLE(cinfo->colormap[2][icolor])) * C2_SCALE;
dist0 += inc2*inc2;
/* Form the initial difference increments */
inc0 = inc0 * (2 * STEP_C0) + STEP_C0 * STEP_C0;
inc1 = inc1 * (2 * STEP_C1) + STEP_C1 * STEP_C1;
inc2 = inc2 * (2 * STEP_C2) + STEP_C2 * STEP_C2;
/* Now loop over all cells in box, updating distance per Thomas method */
bptr = bestdist;
cptr = bestcolor;
xx0 = inc0;
for (ic0 = BOX_C0_ELEMS-1; ic0 >= 0; ic0--) {
dist1 = dist0;
xx1 = inc1;
for (ic1 = BOX_C1_ELEMS-1; ic1 >= 0; ic1--) {
dist2 = dist1;
xx2 = inc2;
for (ic2 = BOX_C2_ELEMS-1; ic2 >= 0; ic2--) {
if (dist2 < *bptr) {
*bptr = dist2;
src/Source/LibJPEG/jquant2.c view on Meta::CPAN
LOCAL(void)
fill_inverse_cmap (j_decompress_ptr cinfo, int c0, int c1, int c2)
/* Fill the inverse-colormap entries in the update box that contains */
/* histogram cell c0/c1/c2. (Only that one cell MUST be filled, but */
/* we can fill as many others as we wish.) */
{
my_cquantize_ptr cquantize = (my_cquantize_ptr) cinfo->cquantize;
hist3d histogram = cquantize->histogram;
int minc0, minc1, minc2; /* lower left corner of update box */
int ic0, ic1, ic2;
register JSAMPLE * cptr; /* pointer into bestcolor[] array */
register histptr cachep; /* pointer into main cache array */
/* This array lists the candidate colormap indexes. */
JSAMPLE colorlist[MAXNUMCOLORS];
int numcolors; /* number of candidate colors */
/* This array holds the actually closest colormap index for each cell. */
JSAMPLE bestcolor[BOX_C0_ELEMS * BOX_C1_ELEMS * BOX_C2_ELEMS];
/* Convert cell coordinates to update box ID */
c0 >>= BOX_C0_LOG;
c1 >>= BOX_C1_LOG;
c2 >>= BOX_C2_LOG;
/* Compute true coordinates of update box's origin corner.
* Actually we compute the coordinates of the center of the corner
* histogram cell, which are the lower bounds of the volume we care about.
*/
minc0 = (c0 << BOX_C0_SHIFT) + ((1 << C0_SHIFT) >> 1);
minc1 = (c1 << BOX_C1_SHIFT) + ((1 << C1_SHIFT) >> 1);
minc2 = (c2 << BOX_C2_SHIFT) + ((1 << C2_SHIFT) >> 1);
/* Determine which colormap entries are close enough to be candidates
* for the nearest entry to some cell in the update box.
*/
numcolors = find_nearby_colors(cinfo, minc0, minc1, minc2, colorlist);
/* Determine the actually nearest colors. */
find_best_colors(cinfo, minc0, minc1, minc2, numcolors, colorlist,
bestcolor);
/* Save the best color numbers (plus 1) in the main cache array */
c0 <<= BOX_C0_LOG; /* convert ID back to base cell indexes */
c1 <<= BOX_C1_LOG;
c2 <<= BOX_C2_LOG;
cptr = bestcolor;
for (ic0 = 0; ic0 < BOX_C0_ELEMS; ic0++) {
for (ic1 = 0; ic1 < BOX_C1_ELEMS; ic1++) {
cachep = & histogram[c0+ic0][c1+ic1][c2];
for (ic2 = 0; ic2 < BOX_C2_ELEMS; ic2++) {
*cachep++ = (histcell) (GETJSAMPLE(*cptr++) + 1);
}
}
}
}
src/Source/LibJPEG/libjpeg.txt view on Meta::CPAN
if you process just one scanline at a time, you must make a one-element
pointer array to conform to this structure. Pointers to JSAMPLE rows are of
type JSAMPROW, and the pointer to the pointer array is of type JSAMPARRAY.
The library accepts or supplies one or more complete scanlines per call.
It is not possible to process part of a row at a time. Scanlines are always
processed top-to-bottom. You can process an entire image in one call if you
have it all in memory, but usually it's simplest to process one scanline at
a time.
For best results, source data values should have the precision specified by
BITS_IN_JSAMPLE (normally 8 bits). For instance, if you choose to compress
data that's only 6 bits/channel, you should left-justify each value in a
byte before passing it to the compressor. If you need to compress data
that has more than 8 bits/channel, compile with BITS_IN_JSAMPLE = 9 to 12.
(See "Library compile-time options", later.)
The data format returned by the decompressor is the same in all details,
except that colormapped output is supported. (Again, a JPEG file is never
colormapped. But you can ask the decompressor to perform on-the-fly color
src/Source/LibJPEG/libjpeg.txt view on Meta::CPAN
ADVANCED FEATURES
=================
Compression parameter selection
-------------------------------
This section describes all the optional parameters you can set for JPEG
compression, as well as the "helper" routines provided to assist in this
task. Proper setting of some parameters requires detailed understanding
of the JPEG standard; if you don't know what a parameter is for, it's best
not to mess with it! See REFERENCES in the README file for pointers to
more info about JPEG.
It's a good idea to call jpeg_set_defaults() first, even if you plan to set
all the parameters; that way your code is more likely to work with future JPEG
libraries that have additional parameters. For the same reason, we recommend
you use a helper routine where one is provided, in preference to twiddling
cinfo fields directly.
The helper routines are:
src/Source/LibJPEG/libjpeg.txt view on Meta::CPAN
procedure.
term_source (j_decompress_ptr cinfo)
Terminate source --- called by jpeg_finish_decompress() after all
data has been read. Often a no-op.
For both fill_input_buffer() and skip_input_data(), there is no such thing
as an EOF return. If the end of the file has been reached, the routine has
a choice of exiting via ERREXIT() or inserting fake data into the buffer.
In most cases, generating a warning message and inserting a fake EOI marker
is the best course of action --- this will allow the decompressor to output
however much of the image is there. In pathological cases, the decompressor
may swallow the EOI and again demand data ... just keep feeding it fake EOIs.
jdatasrc.c illustrates the recommended error recovery behavior.
term_source() is NOT called by jpeg_abort() or jpeg_destroy(). If you want
the source manager to be cleaned up during an abort, you must do it yourself.
You will also need code to create a jpeg_source_mgr struct, fill in its method
pointers, and insert a pointer to the struct into the "src" field of the JPEG
decompression object. This can be done in-line in your setup code if you
src/Source/LibJPEG/libjpeg.txt view on Meta::CPAN
After reading the final scan and reaching the end of the input file, the
buffered image remains available; it can be read additional times by
repeating the jpeg_start_output()/jpeg_read_scanlines()/jpeg_finish_output()
sequence. For example, a useful technique is to use fast one-pass color
quantization for display passes made while the image is arriving, followed by
a final display pass using two-pass quantization for highest quality. This
is done by changing the library parameters before the final output pass.
Changing parameters between passes is discussed in detail below.
In general the last scan of a progressive file cannot be recognized as such
until after it is read, so a post-input display pass is the best approach if
you want special processing in the final pass.
When done with the image, be sure to call jpeg_finish_decompress() to release
the buffered image (or just use jpeg_destroy_decompress()).
If input data arrives faster than it can be displayed, the application can
cause the library to decode input data in advance of what's needed to produce
output. This is done by calling the routine jpeg_consume_input().
The return value is one of the following:
JPEG_REACHED_SOS: reached an SOS marker (the start of a new scan)
src/Source/LibJPEG/libjpeg.txt view on Meta::CPAN
whenever the output processing overtakes the input; thus, simple lockstep
display requires no direct calls to jpeg_consume_input(). But by adding
calls to jpeg_consume_input(), you can absorb data in advance of what is
being displayed. This has two benefits:
* You can limit buildup of unprocessed data in your input buffer.
* You can eliminate extra display passes by paying attention to the
state of the library's input processing.
The first of these benefits only requires interspersing calls to
jpeg_consume_input() with your display operations and any other processing
you may be doing. To avoid wasting cycles due to backtracking, it's best to
call jpeg_consume_input() only after a hundred or so new bytes have arrived.
This is discussed further under "I/O suspension", above. (Note: the JPEG
library currently is not thread-safe. You must not call jpeg_consume_input()
from one thread of control if a different library routine is working on the
same JPEG object in another thread.)
When input arrives fast enough that more than one new scan is available
before you start a new output pass, you may as well skip the output pass
corresponding to the completed scan. This occurs for free if you pass
cinfo.input_scan_number as the target scan number to jpeg_start_output().
src/Source/LibJPEG/libjpeg.txt view on Meta::CPAN
In this situation you may wish to abort the current display pass and start a
new one using the newly arrived information. To do so, just call
jpeg_finish_output() and then start a new pass with jpeg_start_output().
A variant strategy is to abort and restart display if more than one complete
scan arrives during an output pass; this can be detected by noting
JPEG_REACHED_SOS returns and/or examining cinfo.input_scan_number. This
idea should be employed with caution, however, since the display process
might never get to the bottom of the image before being aborted, resulting
in the lower part of the screen being several passes worse than the upper.
In most cases it's probably best to abort an output pass only if the whole
file has arrived and you want to begin the final output pass immediately.
When receiving data across a communication link, we recommend always using
the current input scan number for the output target scan number; if a
higher-quality final pass is to be done, it should be started (aborting any
incomplete output pass) as soon as the end of file is received. However,
many other strategies are possible. For example, the application can examine
the parameters of the current input scan and decide whether to display it or
not. If the scan contains only chroma data, one might choose not to use it
as the target scan, expecting that the scan will be small and will arrive
src/Source/LibJPEG/libjpeg.txt view on Meta::CPAN
single-scan images. Requesting buffered-image mode for such an image wastes
memory. Worse, it can cost time on large images, since the buffered data has
to be swapped out or written to a temporary file. If you are concerned about
maximum performance on baseline JPEG files, you should use buffered-image
mode only when the incoming file actually has multiple scans. This can be
tested by calling jpeg_has_multiple_scans(), which will return a correct
result at any time after jpeg_read_header() completes.
It is also worth noting that when you use jpeg_consume_input() to let input
processing get ahead of output processing, the resulting pattern of access to
the coefficient buffer is quite nonsequential. It's best to use the memory
manager jmemnobs.c if you can (ie, if you have enough real or virtual main
memory). If not, at least make sure that max_memory_to_use is set as high as
possible. If the JPEG memory manager has to use a temporary file, you will
probably see a lot of disk traffic and poor performance. (This could be
improved with additional work on the memory manager, but we haven't gotten
around to it yet.)
In some applications it may be convenient to use jpeg_consume_input() for all
input processing, including reading the initial markers; that is, you may
wish to call jpeg_consume_input() instead of jpeg_read_header() during
src/Source/LibJPEG/libjpeg.txt view on Meta::CPAN
If you wish to store user-supplied text, we recommend you use COM markers
and place readable 7-bit ASCII text in them. Newline conventions are not
standardized --- expect to find LF (Unix style), CR/LF (DOS style), or CR
(Mac style). A robust COM reader should be able to cope with random binary
garbage, including nulls, since some applications generate COM markers
containing non-ASCII junk. (But yours should not be one of them.)
For program-supplied data, use an APPn marker, and be sure to begin it with an
identifying string so that you can tell whether the marker is actually yours.
It's probably best to avoid using APP0 or APP14 for any private markers.
(NOTE: the upcoming SPIFF standard will use APP8 markers; we recommend you
not use APP8 markers for any private purposes, either.)
Keep in mind that at most 65533 bytes can be put into one marker, but you
can have as many markers as you like.
By default, the IJG compression library will write a JFIF APP0 marker if the
selected JPEG colorspace is grayscale or YCbCr, or an Adobe APP14 marker if
the selected colorspace is RGB, CMYK, or YCCK. You can disable this, but
we don't recommend it. The decompression library will recognize JFIF and
src/Source/LibJPEG/libjpeg.txt view on Meta::CPAN
The marker list remains stored until you call jpeg_finish_decompress or
jpeg_abort, at which point the memory is freed and the list is set to empty.
(jpeg_destroy also releases the storage, of course.)
Note that the library is internally interested in APP0 and APP14 markers;
if you try to set a small nonzero length limit on these types, the library
will silently force the length up to the minimum it wants. (But you can set
a zero length limit to prevent them from being saved at all.) Also, in a
16-bit environment, the maximum length limit may be constrained to less than
65533 by malloc() limitations. It is therefore best not to assume that the
effective length limit is exactly what you set it to be.
If you want to supply your own marker-reading routine, you do it by calling
jpeg_set_marker_processor(). A marker processor routine must have the
signature
boolean jpeg_marker_parser_method (j_decompress_ptr cinfo)
Although the marker code is not explicitly passed, the routine can find it
in cinfo->unread_marker. At the time of call, the marker proper has been
read from the data source module. The processor routine is responsible for
src/Source/LibJPEG/rdtarga.c view on Meta::CPAN
/* State info for reading RLE-coded pixels; both counts must be init to 0 */
int block_count; /* # of pixels remaining in RLE block */
int dup_pixel_count; /* # of times to duplicate previous pixel */
/* This saves the correct pixel-row-expansion method for preload_image */
JMETHOD(JDIMENSION, get_pixel_rows, (j_compress_ptr cinfo,
cjpeg_source_ptr sinfo));
} tga_source_struct;
/* For expanding 5-bit pixel values to 8-bit with best rounding */
static const UINT8 c5to8bits[32] = {
0, 8, 16, 25, 33, 41, 49, 58,
66, 74, 82, 90, 99, 107, 115, 123,
132, 140, 148, 156, 165, 173, 181, 189,
197, 206, 214, 222, 230, 239, 247, 255
};
src/Source/LibJPEG/structure.txt view on Meta::CPAN
This file provides an overview of the architecture of the IJG JPEG software;
that is, the functions of the various modules in the system and the interfaces
between modules. For more precise details about any data structure or calling
convention, see the include files and comments in the source code.
We assume that the reader is already somewhat familiar with the JPEG standard.
The README file includes references for learning about JPEG. The file
libjpeg.txt describes the library from the viewpoint of an application
programmer using the library; it's best to read that file before this one.
Also, the file coderules.txt describes the coding style conventions we use.
In this document, JPEG-specific terminology follows the JPEG standard:
A "component" means a color channel, e.g., Red or Luminance.
A "sample" is a single component value (i.e., one number in the image data).
A "coefficient" is a frequency coefficient (a DCT transform output number).
A "block" is an array of samples or coefficients.
An "MCU" (minimum coded unit) is an interleaved set of blocks of size
determined by the sampling factors, or a single block in a
noninterleaved scan.
src/Source/LibJPEG/structure.txt view on Meta::CPAN
* Optional color quantization (e.g., reduction to 256 colors).
* Optional color precision reduction (e.g., 24-bit to 15-bit color).
[This feature is not currently implemented.]
We also need overall control, marker parsing, and a data source module.
The support code (memory management & error handling) can be shared with
the compression half of the library.
There may be several implementations of each of these elements, particularly
in the decompressor, where a wide range of speed/quality tradeoffs is very
useful. It must be understood that some of the best speedups involve
merging adjacent steps in the pipeline. For example, upsampling, color space
conversion, and color quantization might all be done at once when using a
low-quality ordered-dither technique. The system architecture is designed to
allow such merging where appropriate.
Note: it is convenient to regard edge expansion (padding to block boundaries)
as a preprocessing/postprocessing function, even though the JPEG spec includes
it in compression/decompression. We do this because downsampling/upsampling
can be simplified a little if they work on padded data: it's not necessary to
src/Source/LibJPEG/transupp.h view on Meta::CPAN
* basic transformation.
*
* Transpose is the only transformation that can handle partial iMCUs at the
* right and bottom edges completely cleanly. flip_h can flip partial iMCUs
* at the bottom, but leaves any partial iMCUs at the right edge untouched.
* Similarly flip_v leaves any partial iMCUs at the bottom edge untouched.
* The other transforms are defined as combinations of these basic transforms
* and process edge blocks in a way that preserves the equivalence.
*
* The "trim" option causes untransformable partial iMCUs to be dropped;
* this is not strictly lossless, but it usually gives the best-looking
* result for odd-size images. Note that when this option is active,
* the expected mathematical equivalences between the transforms may not hold.
* (For example, -rot 270 -trim trims only the bottom edge, but -rot 90 -trim
* followed by -rot 180 -trim trims both edges.)
*
* We also offer a lossless-crop option, which discards data outside a given
* image region but losslessly preserves what is inside. Like the rotate and
* flip transforms, lossless crop is restricted by the current JPEG format: the
* upper left corner of the selected region must fall on an iMCU boundary. If
* this does not hold for the given crop parameters, we silently move the upper
src/Source/LibJPEG/usage.txt view on Meta::CPAN
one letter. Upper and lower case are equivalent (-BMP is the same as -bmp).
British spellings are also accepted (e.g., -greyscale), though for brevity
these are not mentioned below.
CJPEG DETAILS
The basic command line switches for cjpeg are:
-quality N[,...] Scale quantization tables to adjust image quality.
Quality is 0 (worst) to 100 (best); default is 75.
(See below for more info.)
-grayscale Create monochrome JPEG file from color input.
Be sure to use this switch when compressing a grayscale
BMP file, because cjpeg isn't bright enough to notice
whether a BMP file uses only shades of gray. By
saying -grayscale, you'll get a smaller JPEG file that
takes less time to process.
-rgb Create RGB JPEG file.
src/Source/LibJPEG/usage.txt view on Meta::CPAN
results of the floating-point method may vary slightly
across machines, while the integer methods should give
the same results everywhere. The fast integer method
is much less accurate than the other two.
-dither fs Use Floyd-Steinberg dithering in color quantization.
-dither ordered Use ordered dithering in color quantization.
-dither none Do not use dithering in color quantization.
By default, Floyd-Steinberg dithering is applied when
quantizing colors; this is slow but usually produces
the best results. Ordered dither is a compromise
between speed and quality; no dithering is fast but
usually looks awful. Note that these switches have
no effect unless color quantization is being done.
Ordered dither is only available in -onepass mode.
-map FILE Quantize to the colors used in the specified image
file. This is useful for producing multiple files
with identical color maps, or for forcing a predefined
set of colors to be used. The FILE must be a GIF
or PPM file. This option overrides -colors and
src/Source/LibJPEG/usage.txt view on Meta::CPAN
Color GIF files are not the ideal input for JPEG; JPEG is really intended for
compressing full-color (24-bit) images. In particular, don't try to convert
cartoons, line drawings, and other images that have only a few distinct
colors. GIF works great on these, JPEG does not. If you want to convert a
GIF to JPEG, you should experiment with cjpeg's -quality and -smooth options
to get a satisfactory conversion. -smooth 10 or so is often helpful.
Avoid running an image through a series of JPEG compression/decompression
cycles. Image quality loss will accumulate; after ten or so cycles the image
may be noticeably worse than it was after one cycle. It's best to use a
lossless format while manipulating an image, then convert to JPEG format when
you are ready to file the image away.
The -optimize option to cjpeg is worth using when you are making a "final"
version for posting or archiving. It's also a win when you are using low
quality settings to make very small JPEG files; the percentage improvement
is often a lot more than it is on larger files. (At present, -optimize
mode is always selected when generating progressive JPEG files.)
GIF input files are no longer supported, to avoid the Unisys LZW patent
src/Source/LibPNG/example.c view on Meta::CPAN
png_set_packswap(png_ptr);
/* Turn on interlace handling if you are not using png_write_image() */
if (interlacing != 0)
number_passes = png_set_interlace_handling(png_ptr);
else
number_passes = 1;
/* The easiest way to write the image (you may have a different memory
* layout, however, so choose what fits your needs best). You need to
* use the first method if you aren't handling interlacing yourself.
*/
png_uint_32 k, height, width;
/* In this example, "image" is a one-dimensional array of bytes */
png_byte image[height*width*bytes_per_pixel];
png_bytep row_pointers[height];
if (height > PNG_UINT_32_MAX/(sizeof (png_bytep)))
src/Source/LibPNG/libpng-manual.txt view on Meta::CPAN
png_uint_32 height = PNG_PASS_ROWS(image_height, pass_number);
Respectively these tell you the width and height of the sub-image
corresponding to the numbered pass. 'pass' is in in the range 0 to 6 -
this can be confusing because the specification refers to the same passes
as 1 to 7! Be careful, you must check both the width and height before
calling png_read_rows() and not call it for that pass if either is zero.
You can, of course, read each sub-image row by row. If you want to
produce optimal code to make a pixel-by-pixel transformation of an
interlaced image this is the best approach; read each row of each pass,
transform it, and write it out to a new interlaced image.
If you want to de-interlace the image yourself libpng provides further
macros to help that tell you where to place the pixels in the output image.
Because the interlacing scheme is rectangular - sub-image pixels are always
arranged on a rectangular grid - all you need to know for each pass is the
starting column and row in the output image of the first pixel plus the
spacing between each pixel. As of libpng 1.5 there are four macros to
retrieve this information:
src/Source/LibPNG/libpng-manual.txt view on Meta::CPAN
library code itself needs to know about interactions between your
chunk and existing `intrinsic' chunks.
If you need to write a new intrinsic chunk, first read the PNG
specification. Acquire a first level of understanding of how it works.
Pay particular attention to the sections that describe chunk names,
and look at how other chunks were designed, so you can do things
similarly. Second, check out the sections of libpng that read and
write chunks. Try to find a chunk that is similar to yours and use
it as a template. More details can be found in the comments inside
the code. It is best to handle private or unknown chunks in a generic method,
via callback functions, instead of by modifying libpng functions. This
is illustrated in pngtest.c, which uses a callback function to handle a
private "vpAg" chunk and the new "sTER" chunk, which are both unknown to
libpng.
If you wish to write your own transformation for the data, look through
the part of the code that does the transformations, and check out some of
the simpler ones to get an idea of how they work. Try to find a similar
transformation to the one you want to add and copy off of it. More details
can be found in the comments inside the code itself.
src/Source/LibPNG/libpng-manual.txt view on Meta::CPAN
Libpng-1.6.0 and libpng-1.6.1 wrote uncompressed iTXt chunks with the wrong
length, which resulted in PNG files that cannot be read beyond the bad iTXt
chunk. This error was fixed in libpng-1.6.3, and a tool (called
contrib/tools/png-fix-itxt) has been added to the libpng distribution.
XIII. Detecting libpng
The png_get_io_ptr() function has been present since libpng-0.88, has never
changed, and is unaffected by conditional compilation macros. It is the
best choice for use in configure scripts for detecting the presence of any
libpng version since 0.88. In an autoconf "configure.in" you could use
AC_CHECK_LIB(png, png_get_io_ptr, ...
XV. Source code repository
Since about February 2009, version 1.2.34, libpng has been under "git" source
control. The git repository was built from old libpng-x.y.z.tar.gz files
going back to version 0.70. You can access the git repository (read only)
at
src/Source/LibPNG/libpng.3 view on Meta::CPAN
png_uint_32 height = PNG_PASS_ROWS(image_height, pass_number);
Respectively these tell you the width and height of the sub-image
corresponding to the numbered pass. 'pass' is in in the range 0 to 6 -
this can be confusing because the specification refers to the same passes
as 1 to 7! Be careful, you must check both the width and height before
calling png_read_rows() and not call it for that pass if either is zero.
You can, of course, read each sub-image row by row. If you want to
produce optimal code to make a pixel-by-pixel transformation of an
interlaced image this is the best approach; read each row of each pass,
transform it, and write it out to a new interlaced image.
If you want to de-interlace the image yourself libpng provides further
macros to help that tell you where to place the pixels in the output image.
Because the interlacing scheme is rectangular - sub-image pixels are always
arranged on a rectangular grid - all you need to know for each pass is the
starting column and row in the output image of the first pixel plus the
spacing between each pixel. As of libpng 1.5 there are four macros to
retrieve this information:
src/Source/LibPNG/libpng.3 view on Meta::CPAN
library code itself needs to know about interactions between your
chunk and existing `intrinsic' chunks.
If you need to write a new intrinsic chunk, first read the PNG
specification. Acquire a first level of understanding of how it works.
Pay particular attention to the sections that describe chunk names,
and look at how other chunks were designed, so you can do things
similarly. Second, check out the sections of libpng that read and
write chunks. Try to find a chunk that is similar to yours and use
it as a template. More details can be found in the comments inside
the code. It is best to handle private or unknown chunks in a generic method,
via callback functions, instead of by modifying libpng functions. This
is illustrated in pngtest.c, which uses a callback function to handle a
private "vpAg" chunk and the new "sTER" chunk, which are both unknown to
libpng.
If you wish to write your own transformation for the data, look through
the part of the code that does the transformations, and check out some of
the simpler ones to get an idea of how they work. Try to find a similar
transformation to the one you want to add and copy off of it. More details
can be found in the comments inside the code itself.
src/Source/LibPNG/libpng.3 view on Meta::CPAN
Libpng-1.6.0 and libpng-1.6.1 wrote uncompressed iTXt chunks with the wrong
length, which resulted in PNG files that cannot be read beyond the bad iTXt
chunk. This error was fixed in libpng-1.6.3, and a tool (called
contrib/tools/png-fix-itxt) has been added to the libpng distribution.
.SH XIII. Detecting libpng
The png_get_io_ptr() function has been present since libpng-0.88, has never
changed, and is unaffected by conditional compilation macros. It is the
best choice for use in configure scripts for detecting the presence of any
libpng version since 0.88. In an autoconf "configure.in" you could use
AC_CHECK_LIB(png, png_get_io_ptr, ...
.SH XV. Source code repository
Since about February 2009, version 1.2.34, libpng has been under "git" source
control. The git repository was built from old libpng-x.y.z.tar.gz files
going back to version 0.70. You can access the git repository (read only)
at
src/Source/LibPNG/png.c view on Meta::CPAN
* ( (red-y - blue-y) * (white-x - blue-x) -
* (red-x - blue-x) * (white-y - blue-y) ) / white-y
* -------------------------------------------------------------------------
* (green-x - blue-x)*(red-y - blue-y)-(green-y - blue-y)*(red-x - blue-x)
*
* Accuracy:
* The input values have 5 decimal digits of accuracy. The values are all in
* the range 0 < value < 1, so simple products are in the same range but may
* need up to 10 decimal digits to preserve the original precision and avoid
* underflow. Because we are using a 32-bit signed representation we cannot
* match this; the best is a little over 9 decimal digits, less than 10.
*
* The approach used here is to preserve the maximum precision within the
* signed representation. Because the red-scale calculation above uses the
* difference between two products of values that must be in the range -1..+1
* it is sufficient to divide the product by 7; ceil(100,000/32767*2). The
* factor is irrelevant in the calculation because it is applied to both
* numerator and denominator.
*
* Note that the values of the differences of the products of the
* chromaticities in the above equations tend to be small, for example for
src/Source/LibPNG/png.c view on Meta::CPAN
{
/* sRGB sets known gamma, end points and (from the chunk) intent. */
/* IMPORTANT: these are not necessarily the values found in an ICC profile
* because ICC profiles store values adapted to a D50 environment; it is
* expected that the ICC profile mediaWhitePointTag will be D50; see the
* checks and code elsewhere to understand this better.
*
* These XYZ values, which are accurate to 5dp, produce rgb to gray
* coefficients of (6968,23435,2366), which are reduced (because they add up
* to 32769 not 32768) to (6968,23434,2366). These are the values that
* libpng has traditionally used (and are the best values given the 15bit
* algorithm used by the rgb to gray code.)
*/
static const png_XYZ sRGB_XYZ = /* D65 XYZ (*not* the D50 adapted values!) */
{
/* color X Y Z */
/* red */ 41239, 21264, 1933,
/* green */ 35758, 71517, 11919,
/* blue */ 18048, 7219, 95053
};
src/Source/LibPNG/png.h view on Meta::CPAN
#ifdef PNG_READ_GAMMA_SUPPORTED
/* The threshold on gamma processing is configurable but hard-wired into the
* library. The following is the floating point variant.
*/
#define PNG_GAMMA_THRESHOLD (PNG_GAMMA_THRESHOLD_FIXED*.00001)
/* Handle gamma correction. Screen_gamma=(display_exponent).
* NOTE: this API simply sets the screen and file gamma values. It will
* therefore override the value for gamma in a PNG file if it is called after
* the file header has been read - use with care - call before reading the PNG
* file for best results!
*
* These routines accept the same gamma values as png_set_alpha_mode (described
* above). The PNG_GAMMA_ defines and PNG_DEFAULT_sRGB can be passed to either
* API (floating point or fixed.) Notice, however, that the 'file_gamma' value
* is the inverse of a 'screen gamma' value.
*/
PNG_FP_EXPORT(50, void, png_set_gamma, (png_structrp png_ptr,
double screen_gamma, double override_file_gamma))
PNG_FIXED_EXPORT(208, void, png_set_gamma_fixed, (png_structrp png_ptr,
png_fixed_point screen_gamma, png_fixed_point override_file_gamma))
src/Source/LibPNG/pngpriv.h view on Meta::CPAN
row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY);
PNG_INTERNAL_FUNCTION(void,png_read_filter_row_avg3_neon,(png_row_infop
row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY);
PNG_INTERNAL_FUNCTION(void,png_read_filter_row_avg4_neon,(png_row_infop
row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY);
PNG_INTERNAL_FUNCTION(void,png_read_filter_row_paeth3_neon,(png_row_infop
row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY);
PNG_INTERNAL_FUNCTION(void,png_read_filter_row_paeth4_neon,(png_row_infop
row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY);
/* Choose the best filter to use and filter the row data */
PNG_INTERNAL_FUNCTION(void,png_write_find_filter,(png_structrp png_ptr,
png_row_infop row_info),PNG_EMPTY);
#ifdef PNG_SEQUENTIAL_READ_SUPPORTED
PNG_INTERNAL_FUNCTION(void,png_read_IDAT_data,(png_structrp png_ptr,
png_bytep output, png_alloc_size_t avail_out),PNG_EMPTY);
/* Read 'avail_out' bytes of data from the IDAT stream. If the output buffer
* is NULL the function checks, instead, for the end of the stream. In this
* case a benign error will be issued if the stream end is not found or if
* extra data has to be consumed.
src/Source/LibPNG/pngrtran.c view on Meta::CPAN
(png_uint_32)(num_palette * (sizeof (png_byte))));
for (i = 0; i < num_palette; i++)
png_ptr->quantize_index[i] = (png_byte)i;
}
if (num_palette > maximum_colors)
{
if (histogram != NULL)
{
/* This is easy enough, just throw out the least used colors.
* Perhaps not the best solution, but good enough.
*/
int i;
/* Initialize an array to sort colors */
png_ptr->quantize_sort = (png_bytep)png_malloc(png_ptr,
(png_uint_32)(num_palette * (sizeof (png_byte))));
/* Initialize the quantize_sort array */
for (i = 0; i < num_palette; i++)
src/Source/LibPNG/pngrutil.c view on Meta::CPAN
# ifdef PNG_USE_ABS
pa = abs(p);
pb = abs(pc);
pc = abs(p + pc);
# else
pa = p < 0 ? -p : p;
pb = pc < 0 ? -pc : pc;
pc = (p + pc) < 0 ? -(p + pc) : p + pc;
# endif
/* Find the best predictor, the least of pa, pb, pc favoring the earlier
* ones in the case of a tie.
*/
if (pb < pa) pa = pb, a = b;
if (pc < pa) a = c;
/* Calculate the current pixel in a, and move the previous row pixel to c
* for the next time round the loop
*/
c = b;
a += *row;
src/Source/LibPNG/pngtest.c view on Meta::CPAN
* the basic chunk handling, filtering, and (de)compression code is working
* properly. It does not currently test all of the transforms, although
* it probably should.
*
* The program will report "FAIL" in certain legitimate cases:
* 1) when the compression level or filter selection method is changed.
* 2) when the maximum IDAT size (PNG_ZBUF_SIZE in pngconf.h) is not 8192.
* 3) unknown unsafe-to-copy ancillary chunks or unknown critical chunks
* exist in the input file.
* 4) others not listed here...
* In these cases, it is best to check with another tool such as "pngcheck"
* to see what the differences between the two files are.
*
* If a filename is given on the command-line, then this file is used
* for the input, rather than the default "pngtest.png". This allows
* testing a wide variety of files easily. You can also test a number
* of files at once by typing "pngtest -m file1.png file2.png ..."
*/
#define _POSIX_SOURCE 1
src/Source/LibPNG/pngwrite.c view on Meta::CPAN
if (png_ptr->do_filter == PNG_NO_FILTERS)
#endif /* WRITE_FILTER */
png_ptr->do_filter = PNG_FILTER_NONE;
}
}
else
png_error(png_ptr, "Unknown custom filter method");
}
/* This allows us to influence the way in which libpng chooses the "best"
* filter for the current scanline. While the "minimum-sum-of-absolute-
* differences metric is relatively fast and effective, there is some
* question as to whether it can be improved upon by trying to keep the
* filtered data going to zlib more consistent, hopefully resulting in
* better compression.
*/
#ifdef PNG_WRITE_WEIGHTED_FILTER_SUPPORTED /* GRR 970116 */
/* Convenience reset API. */
static void
png_reset_filter_heuristics(png_structrp png_ptr)
src/Source/LibPNG/pngwutil.c view on Meta::CPAN
png_write_filtered_row(png_structrp png_ptr, png_bytep filtered_row,
png_size_t row_bytes);
#define PNG_MAXSUM (((png_uint_32)(-1)) >> 1)
#define PNG_HISHIFT 10
#define PNG_LOMASK ((png_uint_32)0xffffL)
#define PNG_HIMASK ((png_uint_32)(~PNG_LOMASK >> PNG_HISHIFT))
void /* PRIVATE */
png_write_find_filter(png_structrp png_ptr, png_row_infop row_info)
{
png_bytep best_row;
#ifdef PNG_WRITE_FILTER_SUPPORTED
png_bytep prev_row, row_buf;
png_uint_32 mins, bpp;
png_byte filter_to_do = png_ptr->do_filter;
png_size_t row_bytes = row_info->rowbytes;
#ifdef PNG_WRITE_WEIGHTED_FILTER_SUPPORTED
int num_p_filters = png_ptr->num_prev_filters;
#endif
png_debug(1, "in png_write_find_filter");
src/Source/LibPNG/pngwutil.c view on Meta::CPAN
/* These will never be selected so we need not test them. */
filter_to_do &= ~(PNG_FILTER_UP | PNG_FILTER_PAETH);
}
#endif
/* Find out how many bytes offset each pixel is */
bpp = (row_info->pixel_depth + 7) >> 3;
prev_row = png_ptr->prev_row;
#endif
best_row = png_ptr->row_buf;
#ifdef PNG_WRITE_FILTER_SUPPORTED
row_buf = best_row;
mins = PNG_MAXSUM;
/* The prediction method we use is to find which method provides the
* smallest value when summing the absolute values of the distances
* from zero, using anything >= 128 as negative numbers. This is known
* as the "minimum sum of absolute differences" heuristic. Other
* heuristics are the "weighted minimum sum of absolute differences"
* (experimental and can in theory improve compression), and the "zlib
* predictive" method (not implemented yet), which does test compressions
* of lines using different filter methods, and then chooses the
src/Source/LibPNG/pngwutil.c view on Meta::CPAN
{
*dp = *rp;
}
for (lp = row_buf + 1; i < row_bytes;
i++, rp++, lp++, dp++)
{
*dp = (png_byte)(((int)*rp - (int)*lp) & 0xff);
}
best_row = png_ptr->sub_row;
}
else if ((filter_to_do & PNG_FILTER_SUB) != 0)
{
png_bytep rp, dp, lp;
png_uint_32 sum = 0, lmins = mins;
png_size_t i;
int v;
#ifdef PNG_WRITE_WEIGHTED_FILTER_SUPPORTED
src/Source/LibPNG/pngwutil.c view on Meta::CPAN
sum = PNG_MAXSUM;
else
sum = (sumhi << PNG_HISHIFT) + sumlo;
}
#endif
if (sum < mins)
{
mins = sum;
best_row = png_ptr->sub_row;
}
}
/* Up filter */
if (filter_to_do == PNG_FILTER_UP)
{
png_bytep rp, dp, pp;
png_size_t i;
for (i = 0, rp = row_buf + 1, dp = png_ptr->up_row + 1,
pp = prev_row + 1; i < row_bytes;
i++, rp++, pp++, dp++)
{
*dp = (png_byte)(((int)*rp - (int)*pp) & 0xff);
}
best_row = png_ptr->up_row;
}
else if ((filter_to_do & PNG_FILTER_UP) != 0)
{
png_bytep rp, dp, pp;
png_uint_32 sum = 0, lmins = mins;
png_size_t i;
int v;
src/Source/LibPNG/pngwutil.c view on Meta::CPAN
sum = PNG_MAXSUM;
else
sum = (sumhi << PNG_HISHIFT) + sumlo;
}
#endif
if (sum < mins)
{
mins = sum;
best_row = png_ptr->up_row;
}
}
/* Avg filter */
if (filter_to_do == PNG_FILTER_AVG)
{
png_bytep rp, dp, pp, lp;
png_uint_32 i;
for (i = 0, rp = row_buf + 1, dp = png_ptr->avg_row + 1,
pp = prev_row + 1; i < bpp; i++)
{
*dp++ = (png_byte)(((int)*rp++ - ((int)*pp++ / 2)) & 0xff);
}
for (lp = row_buf + 1; i < row_bytes; i++)
{
*dp++ = (png_byte)(((int)*rp++ - (((int)*pp++ + (int)*lp++) / 2))
& 0xff);
}
best_row = png_ptr->avg_row;
}
else if ((filter_to_do & PNG_FILTER_AVG) != 0)
{
png_bytep rp, dp, pp, lp;
png_uint_32 sum = 0, lmins = mins;
png_size_t i;
int v;
#ifdef PNG_WRITE_WEIGHTED_FILTER_SUPPORTED
src/Source/LibPNG/pngwutil.c view on Meta::CPAN
sum = PNG_MAXSUM;
else
sum = (sumhi << PNG_HISHIFT) + sumlo;
}
#endif
if (sum < mins)
{
mins = sum;
best_row = png_ptr->avg_row;
}
}
/* Paeth filter */
if ((filter_to_do == PNG_FILTER_PAETH) != 0)
{
png_bytep rp, dp, pp, cp, lp;
png_size_t i;
for (i = 0, rp = row_buf + 1, dp = png_ptr->paeth_row + 1,
src/Source/LibPNG/pngwutil.c view on Meta::CPAN
#else
pa = p < 0 ? -p : p;
pb = pc < 0 ? -pc : pc;
pc = (p + pc) < 0 ? -(p + pc) : p + pc;
#endif
p = (pa <= pb && pa <=pc) ? a : (pb <= pc) ? b : c;
*dp++ = (png_byte)(((int)*rp++ - p) & 0xff);
}
best_row = png_ptr->paeth_row;
}
else if ((filter_to_do & PNG_FILTER_PAETH) != 0)
{
png_bytep rp, dp, pp, cp, lp;
png_uint_32 sum = 0, lmins = mins;
png_size_t i;
int v;
#ifdef PNG_WRITE_WEIGHTED_FILTER_SUPPORTED
src/Source/LibPNG/pngwutil.c view on Meta::CPAN
if (sumhi > PNG_HIMASK)
sum = PNG_MAXSUM;
else
sum = (sumhi << PNG_HISHIFT) + sumlo;
}
#endif
if (sum < mins)
{
best_row = png_ptr->paeth_row;
}
}
#endif /* WRITE_FILTER */
/* Do the actual writing of the filtered row data from the chosen filter. */
png_write_filtered_row(png_ptr, best_row, row_info->rowbytes+1);
#ifdef PNG_WRITE_FILTER_SUPPORTED
#ifdef PNG_WRITE_WEIGHTED_FILTER_SUPPORTED
/* Save the type of filter we picked this time for future calculations */
if (png_ptr->num_prev_filters > 0)
{
int j;
for (j = 1; j < num_p_filters; j++)
{
png_ptr->prev_filters[j] = png_ptr->prev_filters[j - 1];
}
png_ptr->prev_filters[j] = best_row[0];
}
#endif
#endif /* WRITE_FILTER */
}
/* Do the actual writing of a previously filtered row. */
static void
png_write_filtered_row(png_structrp png_ptr, png_bytep filtered_row,
png_size_t full_row_length/*includes filter byte*/)
src/Source/LibRawLite/dcraw/dcraw.1.html view on Meta::CPAN
<B>| pamsumm -max</B>
<DT><B></B>
<DD>
The default darkness and saturation are usually correct.
<DT><B>-n noise_threshold</B>
<DD>
Use wavelets to erase noise while preserving real detail.
The best threshold should be somewhere between 100 and 1000.
<DT><B>-C red_mag blue_mag</B>
<DD>
Enlarge the raw red and blue layers by the given factors,
typically 0.999 to 1.001, to correct chromatic aberration.
<DT><B>-H 0</B>
<DD>
Clip all highlights to solid white (default).
<DT><B>-H 1</B>
src/Source/LibTIFF4/ChangeLog view on Meta::CPAN
2007-10-24 Joris Van Damme <joris.at.lebbeke@skynet.be>
* tif_dirread.c: Fixed problem with bogus file triggering
assert(td->td_planarconfig == PLANARCONFIG_CONTIG) in
ChopUpSingleUncompressedStrip
2007-10-22 Joris Van Damme <joris.at.lebbeke@skynet.be>
* tif_jpeg.c: Resolved buffer incrementation bug that lead to faulty images
at best, access violation at worst, when subsampled JPEG compressed imagery
is decoded without the JPEG_COLORMODE feature
2007-10-11 Frank Warmerdam <warmerdam@pobox.com>
* html/index.html: Update "people responsible" section.
2007-10-05 Frank Warmerdam <warmerdam@pobox.com>
* tools/tiff2pdf.c: Fix problem with alpha setting in some cases
as reported on the mailing list.