mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-29 15:52:07 +00:00
Bug 755869 - [1] Update Skia to r4037 r=mattwoodrow
This commit is contained in:
parent
dbf40c6c8c
commit
e03d6fcfc5
@ -2,4 +2,4 @@ The source from this directory was copied from the skia subversion trunk
|
||||
using the update.sh script. The changes made were those applied by update.sh,
|
||||
the addition/update of Makefile.in files for the Mozilla build system.
|
||||
|
||||
The subversion revision used was r2980.
|
||||
The subversion revision used was r4037.
|
||||
|
@ -86,6 +86,15 @@
|
||||
//#define SK_CPU_BENDIAN
|
||||
//#define SK_CPU_LENDIAN
|
||||
|
||||
/* Most compilers use the same bit endianness for bit flags in a byte as the
|
||||
system byte endianness, and this is the default. If for some reason this
|
||||
needs to be overridden, specify which of the mutually exclusive flags to
|
||||
use. For example, some atom processors in certain configurations have big
|
||||
endian byte order but little endian bit orders.
|
||||
*/
|
||||
//#define SK_UINT8_BITFIELD_BENDIAN
|
||||
//#define SK_UINT8_BITFIELD_LENDIAN
|
||||
|
||||
|
||||
/* Some compilers don't support long long for 64bit integers. If yours does
|
||||
not, define this to the appropriate type.
|
||||
@ -112,9 +121,11 @@
|
||||
|
||||
/* If zlib is available and you want to support the flate compression
|
||||
algorithm (used in PDF generation), define SK_ZLIB_INCLUDE to be the
|
||||
include path.
|
||||
include path. Alternatively, define SK_SYSTEM_ZLIB to use the system zlib
|
||||
library specified as "#include <zlib.h>".
|
||||
*/
|
||||
//#define SK_ZLIB_INCLUDE <zlib.h>
|
||||
//#define SK_SYSTEM_ZLIB
|
||||
|
||||
/* Define this to allow PDF scalars above 32k. The PDF/A spec doesn't allow
|
||||
them, but modern PDF interpreters should handle them just fine.
|
||||
@ -145,10 +156,6 @@
|
||||
//#define SK_SUPPORT_UNITTEST
|
||||
#endif
|
||||
|
||||
/* Don't dither 32bit gradients, to match what the canvas test suite expects.
|
||||
*/
|
||||
#define SK_DISABLE_DITHER_32BIT_GRADIENT
|
||||
|
||||
/* If your system embeds skia and has complex event logging, define this
|
||||
symbol to name a file that maps the following macros to your system's
|
||||
equivalents:
|
||||
@ -170,10 +177,4 @@
|
||||
#define SK_A32_SHIFT 24
|
||||
#endif
|
||||
|
||||
/* Don't include stdint.h on windows as it conflicts with our build system.
|
||||
*/
|
||||
#ifdef SK_BUILD_FOR_WIN32
|
||||
#define SK_IGNORE_STDINT_DOT_H
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -34,7 +34,7 @@ public:
|
||||
kCFF_Font,
|
||||
kTrueType_Font,
|
||||
kOther_Font,
|
||||
kNotEmbeddable_Font
|
||||
kNotEmbeddable_Font,
|
||||
};
|
||||
// The type of the underlying font program. This field determines which
|
||||
// of the following fields are valid. If it is kOther_Font or
|
||||
@ -56,7 +56,7 @@ public:
|
||||
kItalic_Style = 0x00040,
|
||||
kAllCaps_Style = 0x10000,
|
||||
kSmallCaps_Style = 0x20000,
|
||||
kForceBold_Style = 0x40000
|
||||
kForceBold_Style = 0x40000,
|
||||
};
|
||||
uint16_t fStyle; // Font style characteristics.
|
||||
int16_t fItalicAngle; // Counterclockwise degrees from vertical of the
|
||||
@ -75,7 +75,7 @@ public:
|
||||
kHAdvance_PerGlyphInfo = 0x1, // Populate horizontal advance data.
|
||||
kVAdvance_PerGlyphInfo = 0x2, // Populate vertical advance data.
|
||||
kGlyphNames_PerGlyphInfo = 0x4, // Populate glyph names (Type 1 only).
|
||||
kToUnicode_PerGlyphInfo = 0x8 // Populate ToUnicode table, ignored
|
||||
kToUnicode_PerGlyphInfo = 0x8, // Populate ToUnicode table, ignored
|
||||
// for Type 1 fonts
|
||||
};
|
||||
|
||||
@ -84,7 +84,7 @@ public:
|
||||
enum MetricType {
|
||||
kDefault, // Default advance: fAdvance.count = 1
|
||||
kRange, // Advances for a range: fAdvance.count = fEndID-fStartID
|
||||
kRun // fStartID-fEndID have same advance: fAdvance.count = 1
|
||||
kRun, // fStartID-fEndID have same advance: fAdvance.count = 1
|
||||
};
|
||||
MetricType fType;
|
||||
uint16_t fStartId;
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include "SkRefCnt.h"
|
||||
|
||||
struct SkIRect;
|
||||
struct SkRect;
|
||||
class SkColorTable;
|
||||
class SkPaint;
|
||||
class SkPixelRef;
|
||||
@ -63,16 +64,21 @@ public:
|
||||
kConfigCount
|
||||
};
|
||||
|
||||
/** Default construct creates a bitmap with zero width and height, and no pixels.
|
||||
Its config is set to kNo_Config.
|
||||
*/
|
||||
/**
|
||||
* Default construct creates a bitmap with zero width and height, and no pixels.
|
||||
* Its config is set to kNo_Config.
|
||||
*/
|
||||
SkBitmap();
|
||||
/** Constructor initializes the new bitmap by copying the src bitmap. All fields are copied,
|
||||
but ownership of the pixels remains with the src bitmap.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Copy the settings from the src into this bitmap. If the src has pixels
|
||||
* allocated, they will be shared, not copied, so that the two bitmaps will
|
||||
* reference the same memory for the pixels. If a deep copy is needed,
|
||||
* where the new bitmap has its own separate copy of the pixels, use
|
||||
* deepCopyTo().
|
||||
*/
|
||||
SkBitmap(const SkBitmap& src);
|
||||
/** Decrements our (shared) pixel ownership if needed.
|
||||
*/
|
||||
|
||||
~SkBitmap();
|
||||
|
||||
/** Copies the src bitmap into this bitmap. Ownership of the src bitmap's pixels remains
|
||||
@ -218,6 +224,12 @@ public:
|
||||
static Sk64 ComputeSize64(Config, int width, int height);
|
||||
static size_t ComputeSize(Config, int width, int height);
|
||||
|
||||
/**
|
||||
* Return the bitmap's bounds [0, 0, width, height] as an SkRect
|
||||
*/
|
||||
void getBounds(SkRect* bounds) const;
|
||||
void getBounds(SkIRect* bounds) const;
|
||||
|
||||
/** Set the bitmap's config and dimensions. If rowBytes is 0, then
|
||||
ComputeRowBytes() is called to compute the optimal value. This resets
|
||||
any pixel/colortable ownership, just like reset().
|
||||
|
@ -36,20 +36,13 @@ public:
|
||||
const SkPMColor* src,
|
||||
int count, U8CPU alpha, int x, int y);
|
||||
|
||||
/** Function pointer that blends a single color with a row of 32-bit colors
|
||||
onto a 32-bit destination
|
||||
*/
|
||||
typedef void (*ColorProc)(SkPMColor* dst, const SkPMColor* src, int count,
|
||||
SkPMColor color);
|
||||
|
||||
//! Public entry-point to return a blit function ptr
|
||||
static Proc Factory(unsigned flags, SkBitmap::Config);
|
||||
|
||||
///////////// D32 version
|
||||
|
||||
enum Flags32 {
|
||||
kGlobalAlpha_Flag32 = 1 << 0,
|
||||
kSrcPixelAlpha_Flag32 = 1 << 1
|
||||
kSrcPixelAlpha_Flag32 = 1 << 1,
|
||||
};
|
||||
|
||||
/** Function pointer that blends 32bit colors onto a 32bit destination.
|
||||
@ -64,6 +57,12 @@ public:
|
||||
|
||||
static Proc32 Factory32(unsigned flags32);
|
||||
|
||||
/** Function pointer that blends a single color with a row of 32-bit colors
|
||||
onto a 32-bit destination
|
||||
*/
|
||||
typedef void (*ColorProc)(SkPMColor* dst, const SkPMColor* src, int count,
|
||||
SkPMColor color);
|
||||
|
||||
/** Blend a single color onto a row of S32 pixels, writing the result
|
||||
into a row of D32 pixels. src and dst may be the same memory, but
|
||||
if they are not, they may not overlap.
|
||||
@ -71,8 +70,20 @@ public:
|
||||
static void Color32(SkPMColor dst[], const SkPMColor src[],
|
||||
int count, SkPMColor color);
|
||||
|
||||
//! Public entry-point to return a blit function ptr
|
||||
static ColorProc ColorProcFactory();
|
||||
|
||||
/** Function pointer that blends a single color onto a 32-bit rectangle. */
|
||||
typedef void (*ColorRectProc)(SkPMColor* dst, int width, int height,
|
||||
size_t rowBytes, SkPMColor color);
|
||||
|
||||
/** Blend a single color into a rectangle of D32 pixels. */
|
||||
static void ColorRect32(SkPMColor* dst, int width, int height,
|
||||
size_t rowBytes, SkPMColor color);
|
||||
|
||||
//! Public entry-point to return a blit function ptr
|
||||
static ColorRectProc ColorRectProcFactory();
|
||||
|
||||
/** These static functions are called by the Factory and Factory32
|
||||
functions, and should return either NULL, or a
|
||||
platform-specific function-ptr to be used in place of the
|
||||
|
@ -61,6 +61,11 @@ public:
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/**
|
||||
* Trigger the immediate execution of all pending draw operations.
|
||||
*/
|
||||
void flush();
|
||||
|
||||
/**
|
||||
* Return the width/height of the underlying device. The current drawable
|
||||
* area may be small (due to clipping or saveLayer). For a canvas with
|
||||
@ -78,7 +83,7 @@ public:
|
||||
reference count is incremented. If the canvas was already holding a
|
||||
device, its reference count is decremented. The new device is returned.
|
||||
*/
|
||||
SkDevice* setDevice(SkDevice* device);
|
||||
virtual SkDevice* setDevice(SkDevice* device);
|
||||
|
||||
/**
|
||||
* saveLayer() can create another device (which is later drawn onto
|
||||
@ -86,8 +91,14 @@ public:
|
||||
* installed. Note that this can change on other calls like save/restore,
|
||||
* so do not access this device after subsequent canvas calls.
|
||||
* The reference count of the device is not changed.
|
||||
*
|
||||
* @param updateMatrixClip If this is true, then before the device is
|
||||
* returned, we ensure that its has been notified about the current
|
||||
* matrix and clip. Note: this happens automatically when the device
|
||||
* is drawn to, but is optional here, as there is a small perf hit
|
||||
* sometimes.
|
||||
*/
|
||||
SkDevice* getTopDevice() const;
|
||||
SkDevice* getTopDevice(bool updateMatrixClip = false) const;
|
||||
|
||||
/**
|
||||
* Create a new raster device and make it current. This also returns
|
||||
@ -99,7 +110,7 @@ public:
|
||||
* Shortcut for getDevice()->createCompatibleDevice(...).
|
||||
* If getDevice() == NULL, this method does nothing, and returns NULL.
|
||||
*/
|
||||
SkDevice* createCompatibleDevice(SkBitmap::Config config,
|
||||
SkDevice* createCompatibleDevice(SkBitmap::Config config,
|
||||
int width, int height,
|
||||
bool isOpaque);
|
||||
|
||||
@ -137,7 +148,7 @@ public:
|
||||
* low byte to high byte: R, G, B, A.
|
||||
*/
|
||||
kRGBA_Premul_Config8888,
|
||||
kRGBA_Unpremul_Config8888
|
||||
kRGBA_Unpremul_Config8888,
|
||||
};
|
||||
|
||||
/**
|
||||
@ -152,7 +163,7 @@ public:
|
||||
* kARGB_8888_Config as SkPMColor
|
||||
*
|
||||
* If the bitmap has pixels already allocated, the canvas pixels will be
|
||||
* written there. If not, bitmap->allocPixels() will be called
|
||||
* written there. If not, bitmap->allocPixels() will be called
|
||||
* automatically. If the bitmap is backed by a texture readPixels will
|
||||
* fail.
|
||||
*
|
||||
@ -290,7 +301,7 @@ public:
|
||||
/** Returns true if drawing is currently going to a layer (from saveLayer)
|
||||
* rather than to the root device.
|
||||
*/
|
||||
bool isDrawingToLayer() const;
|
||||
virtual bool isDrawingToLayer() const;
|
||||
|
||||
/** Preconcat the current matrix with the specified translation
|
||||
@param dx The distance to translate in X
|
||||
@ -424,7 +435,16 @@ public:
|
||||
@return true if the horizontal band is completely clipped out (i.e. does
|
||||
not intersect the current clip)
|
||||
*/
|
||||
bool quickRejectY(SkScalar top, SkScalar bottom, EdgeType et) const;
|
||||
bool quickRejectY(SkScalar top, SkScalar bottom, EdgeType et) const {
|
||||
SkASSERT(SkScalarToCompareType(top) <= SkScalarToCompareType(bottom));
|
||||
const SkRectCompareType& clipR = this->getLocalClipBoundsCompareType(et);
|
||||
// In the case where the clip is empty and we are provided with a
|
||||
// negative top and positive bottom parameter then this test will return
|
||||
// false even though it will be clipped. We have chosen to exclude that
|
||||
// check as it is rare and would result double the comparisons.
|
||||
return SkScalarToCompareType(top) >= clipR.fBottom
|
||||
|| SkScalarToCompareType(bottom) <= clipR.fTop;
|
||||
}
|
||||
|
||||
/** Return the bounds of the current clip (in local coordinates) in the
|
||||
bounds parameter, and return true if it is non-empty. This can be useful
|
||||
@ -438,7 +458,7 @@ public:
|
||||
then taking its bounds.
|
||||
*/
|
||||
bool getClipDeviceBounds(SkIRect* bounds) const;
|
||||
|
||||
|
||||
|
||||
/** Fill the entire canvas' bitmap (restricted to the current clip) with the
|
||||
specified ARGB color, using the specified mode.
|
||||
@ -859,27 +879,28 @@ public:
|
||||
ClipType getClipType() const;
|
||||
|
||||
/** Return the current device clip (concatenation of all clip calls).
|
||||
This does not account for the translate in any of the devices.
|
||||
@return the current device clip (concatenation of all clip calls).
|
||||
*/
|
||||
* This does not account for the translate in any of the devices.
|
||||
* @return the current device clip (concatenation of all clip calls).
|
||||
*
|
||||
* DEPRECATED -- call getClipDeviceBounds() instead.
|
||||
*/
|
||||
const SkRegion& getTotalClip() const;
|
||||
|
||||
/**
|
||||
* Return true if the current clip is non-empty.
|
||||
*
|
||||
* If bounds is not NULL, set it to the bounds of the current clip
|
||||
* in global coordinates.
|
||||
*/
|
||||
bool getTotalClipBounds(SkIRect* bounds) const;
|
||||
|
||||
/**
|
||||
* Return the current clipstack. This mirrors the result in getTotalClip()
|
||||
* but is represented as a stack of geometric clips + region-ops.
|
||||
*/
|
||||
const SkClipStack& getTotalClipStack() const;
|
||||
|
||||
void setExternalMatrix(const SkMatrix* = NULL);
|
||||
|
||||
class ClipVisitor {
|
||||
public:
|
||||
virtual void clipRect(const SkRect&, SkRegion::Op, bool antialias) = 0;
|
||||
virtual void clipPath(const SkPath&, SkRegion::Op, bool antialias) = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* Replays the clip operations, back to front, that have been applied to
|
||||
* the canvas, calling the appropriate method on the visitor for each
|
||||
* clip. All clips have already been transformed into device space.
|
||||
*/
|
||||
void replayClips(ClipVisitor*) const;
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/** After calling saveLayer(), there can be any number of devices that make
|
||||
@ -921,9 +942,20 @@ public:
|
||||
};
|
||||
|
||||
protected:
|
||||
// Returns the canvas to be used by DrawIter. Default implementation
|
||||
// returns this. Subclasses that encapsulate an indirect canvas may
|
||||
// need to overload this method. The impl must keep track of this, as it
|
||||
// is not released or deleted by the caller.
|
||||
virtual SkCanvas* canvasForDrawIter();
|
||||
|
||||
// all of the drawBitmap variants call this guy
|
||||
virtual void commonDrawBitmap(const SkBitmap&, const SkIRect*,
|
||||
const SkMatrix&, const SkPaint& paint);
|
||||
void commonDrawBitmap(const SkBitmap&, const SkIRect*, const SkMatrix&,
|
||||
const SkPaint& paint);
|
||||
|
||||
// Clip rectangle bounds. Called internally by saveLayer.
|
||||
// returns false if the entire rectangle is entirely clipped out
|
||||
bool clipRectBounds(const SkRect* bounds, SaveFlags flags,
|
||||
SkIRect* intersection);
|
||||
|
||||
private:
|
||||
class MCRec;
|
||||
@ -937,7 +969,7 @@ private:
|
||||
|
||||
SkBounder* fBounder;
|
||||
SkDevice* fLastDeviceToGainFocus;
|
||||
int fLayerCount; // number of successful saveLayer calls
|
||||
int fSaveLayerCount; // number of successful saveLayer calls
|
||||
|
||||
void prepareForDeviceDraw(SkDevice*, const SkMatrix&, const SkRegion&,
|
||||
const SkClipStack& clipStack);
|
||||
@ -946,8 +978,9 @@ private:
|
||||
void updateDeviceCMCache();
|
||||
|
||||
friend class SkDrawIter; // needs setupDrawForLayerDevice()
|
||||
friend class AutoDrawLooper;
|
||||
|
||||
SkDevice* createLayerDevice(SkBitmap::Config, int width, int height,
|
||||
SkDevice* createLayerDevice(SkBitmap::Config, int width, int height,
|
||||
bool isOpaque);
|
||||
|
||||
SkDevice* init(SkDevice*);
|
||||
@ -961,9 +994,10 @@ private:
|
||||
void internalDrawBitmapNine(const SkBitmap& bitmap, const SkIRect& center,
|
||||
const SkRect& dst, const SkPaint* paint);
|
||||
void internalDrawPaint(const SkPaint& paint);
|
||||
int internalSaveLayer(const SkRect* bounds, const SkPaint* paint,
|
||||
SaveFlags, bool justForImageFilter);
|
||||
void internalDrawDevice(SkDevice*, int x, int y, const SkPaint*);
|
||||
|
||||
|
||||
void drawDevice(SkDevice*, int x, int y, const SkPaint*);
|
||||
// shared by save() and saveLayer()
|
||||
int internalSave(SaveFlags flags);
|
||||
void internalRestore();
|
||||
|
@ -17,18 +17,12 @@ public:
|
||||
SkChunkAlloc(size_t minSize);
|
||||
~SkChunkAlloc();
|
||||
|
||||
/** Free up all allocated blocks. This invalidates all returned
|
||||
pointers.
|
||||
*/
|
||||
/**
|
||||
* Free up all allocated blocks. This invalidates all returned
|
||||
* pointers.
|
||||
*/
|
||||
void reset();
|
||||
|
||||
/** Reuse all allocated blocks. This invalidates all returned
|
||||
pointers (like reset) but doesn't necessarily free up all
|
||||
of the privately allocated blocks. This is more efficient
|
||||
if you plan to reuse the allocator multiple times.
|
||||
*/
|
||||
void reuse();
|
||||
|
||||
enum AllocFailType {
|
||||
kReturnNil_AllocFailType,
|
||||
kThrow_AllocFailType
|
||||
@ -48,6 +42,7 @@ public:
|
||||
size_t unalloc(void* ptr);
|
||||
|
||||
size_t totalCapacity() const { return fTotalCapacity; }
|
||||
int blockCount() const { return fBlockCount; }
|
||||
|
||||
/**
|
||||
* Returns true if the specified address is within one of the chunks, and
|
||||
@ -58,10 +53,12 @@ public:
|
||||
|
||||
private:
|
||||
struct Block;
|
||||
|
||||
Block* fBlock;
|
||||
size_t fMinSize;
|
||||
Block* fPool;
|
||||
size_t fChunkSize;
|
||||
size_t fTotalCapacity;
|
||||
int fBlockCount;
|
||||
|
||||
Block* newBlock(size_t bytes, AllocFailType ftype);
|
||||
};
|
||||
|
@ -18,7 +18,7 @@ class SK_API SkClipStack {
|
||||
public:
|
||||
SkClipStack();
|
||||
SkClipStack(const SkClipStack& b);
|
||||
~SkClipStack() {}
|
||||
~SkClipStack();
|
||||
|
||||
SkClipStack& operator=(const SkClipStack& b);
|
||||
bool operator==(const SkClipStack& b) const;
|
||||
@ -30,8 +30,7 @@ public:
|
||||
void save();
|
||||
void restore();
|
||||
|
||||
void clipDevRect(const SkIRect& ir,
|
||||
SkRegion::Op op = SkRegion::kIntersect_Op) {
|
||||
void clipDevRect(const SkIRect& ir, SkRegion::Op op) {
|
||||
SkRect r;
|
||||
r.set(ir);
|
||||
this->clipDevRect(r, op, false);
|
||||
@ -49,7 +48,8 @@ public:
|
||||
B2FIter(const SkClipStack& stack);
|
||||
|
||||
struct Clip {
|
||||
Clip() : fRect(NULL), fPath(NULL), fOp(SkRegion::kIntersect_Op) {}
|
||||
Clip() : fRect(NULL), fPath(NULL), fOp(SkRegion::kIntersect_Op),
|
||||
fDoAA(false) {}
|
||||
friend bool operator==(const Clip& a, const Clip& b);
|
||||
friend bool operator!=(const Clip& a, const Clip& b);
|
||||
const SkRect* fRect; // if non-null, this is a rect clip
|
||||
|
@ -118,7 +118,7 @@ public:
|
||||
*/
|
||||
static SkColorFilter* CreateLightingFilter(SkColor mul, SkColor add);
|
||||
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR()
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP()
|
||||
protected:
|
||||
SkColorFilter() {}
|
||||
SkColorFilter(SkFlattenableReadBuffer& rb) : INHERITED(rb) {}
|
||||
@ -143,13 +143,13 @@ public:
|
||||
virtual void beginSession();
|
||||
virtual void endSession();
|
||||
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkFilterShader)
|
||||
|
||||
protected:
|
||||
SkFilterShader(SkFlattenableReadBuffer& );
|
||||
virtual void flatten(SkFlattenableWriteBuffer& ) SK_OVERRIDE;
|
||||
virtual Factory getFactory() SK_OVERRIDE { return CreateProc; }
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
private:
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer& buffer) {
|
||||
return SkNEW_ARGS(SkFilterShader, (buffer)); }
|
||||
SkShader* fShader;
|
||||
SkColorFilter* fFilter;
|
||||
|
||||
|
@ -215,6 +215,68 @@ static inline SkPMColor SkPackARGB32(U8CPU a, U8CPU r, U8CPU g, U8CPU b) {
|
||||
(g << SK_G32_SHIFT) | (b << SK_B32_SHIFT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Abstract 4-byte interpolation, implemented on top of SkPMColor
|
||||
* utility functions. Third parameter controls blending of the first two:
|
||||
* (src, dst, 0) returns dst
|
||||
* (src, dst, 0xFF) returns src
|
||||
* srcWeight is [0..256], unlike SkFourByteInterp which takes [0..255]
|
||||
*/
|
||||
static inline SkPMColor SkFourByteInterp256(SkPMColor src, SkPMColor dst,
|
||||
unsigned scale) {
|
||||
unsigned a = SkAlphaBlend(SkGetPackedA32(src), SkGetPackedA32(dst), scale);
|
||||
unsigned r = SkAlphaBlend(SkGetPackedR32(src), SkGetPackedR32(dst), scale);
|
||||
unsigned g = SkAlphaBlend(SkGetPackedG32(src), SkGetPackedG32(dst), scale);
|
||||
unsigned b = SkAlphaBlend(SkGetPackedB32(src), SkGetPackedB32(dst), scale);
|
||||
|
||||
return SkPackARGB32(a, r, g, b);
|
||||
}
|
||||
|
||||
/**
|
||||
* Abstract 4-byte interpolation, implemented on top of SkPMColor
|
||||
* utility functions. Third parameter controls blending of the first two:
|
||||
* (src, dst, 0) returns dst
|
||||
* (src, dst, 0xFF) returns src
|
||||
*/
|
||||
static inline SkPMColor SkFourByteInterp(SkPMColor src, SkPMColor dst,
|
||||
U8CPU srcWeight) {
|
||||
unsigned scale = SkAlpha255To256(srcWeight);
|
||||
return SkFourByteInterp256(src, dst, scale);
|
||||
}
|
||||
|
||||
/**
|
||||
* 32b optimized version; currently appears to be 10% faster even on 64b
|
||||
* architectures than an equivalent 64b version and 30% faster than
|
||||
* SkFourByteInterp(). Third parameter controls blending of the first two:
|
||||
* (src, dst, 0) returns dst
|
||||
* (src, dst, 0xFF) returns src
|
||||
* ** Does not match the results of SkFourByteInterp() because we use
|
||||
* a more accurate scale computation!
|
||||
* TODO: migrate Skia function to using an accurate 255->266 alpha
|
||||
* conversion.
|
||||
*/
|
||||
static inline SkPMColor SkFastFourByteInterp(SkPMColor src,
|
||||
SkPMColor dst,
|
||||
U8CPU srcWeight) {
|
||||
SkASSERT(srcWeight < 256);
|
||||
|
||||
// Reorders ARGB to AG-RB in order to reduce the number of operations.
|
||||
const uint32_t mask = 0xFF00FF;
|
||||
uint32_t src_rb = src & mask;
|
||||
uint32_t src_ag = (src >> 8) & mask;
|
||||
uint32_t dst_rb = dst & mask;
|
||||
uint32_t dst_ag = (dst >> 8) & mask;
|
||||
|
||||
// scale = srcWeight + (srcWeight >> 7) is more accurate than
|
||||
// scale = srcWeight + 1, but 7% slower
|
||||
int scale = srcWeight + (srcWeight >> 7);
|
||||
|
||||
uint32_t ret_rb = src_rb * scale + (256 - scale) * dst_rb;
|
||||
uint32_t ret_ag = src_ag * scale + (256 - scale) * dst_ag;
|
||||
|
||||
return (ret_ag & ~mask) | ((ret_rb & ~mask) >> 8);
|
||||
}
|
||||
|
||||
/**
|
||||
* Same as SkPackARGB32, but this version guarantees to not check that the
|
||||
* values are premultiplied in the debug version.
|
||||
@ -663,5 +725,116 @@ static inline uint32_t SkExpand32_4444(SkPMColor c) {
|
||||
// used for cheap 2x2 dithering when the colors are opaque
|
||||
void sk_dither_memset16(uint16_t dst[], uint16_t value, uint16_t other, int n);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static inline int SkUpscale31To32(int value) {
|
||||
SkASSERT((unsigned)value <= 31);
|
||||
return value + (value >> 4);
|
||||
}
|
||||
|
||||
static inline int SkBlend32(int src, int dst, int scale) {
|
||||
SkASSERT((unsigned)src <= 0xFF);
|
||||
SkASSERT((unsigned)dst <= 0xFF);
|
||||
SkASSERT((unsigned)scale <= 32);
|
||||
return dst + ((src - dst) * scale >> 5);
|
||||
}
|
||||
|
||||
static inline SkPMColor SkBlendLCD16(int srcA, int srcR, int srcG, int srcB,
|
||||
SkPMColor dst, uint16_t mask) {
|
||||
if (mask == 0) {
|
||||
return dst;
|
||||
}
|
||||
|
||||
/* We want all of these in 5bits, hence the shifts in case one of them
|
||||
* (green) is 6bits.
|
||||
*/
|
||||
int maskR = SkGetPackedR16(mask) >> (SK_R16_BITS - 5);
|
||||
int maskG = SkGetPackedG16(mask) >> (SK_G16_BITS - 5);
|
||||
int maskB = SkGetPackedB16(mask) >> (SK_B16_BITS - 5);
|
||||
|
||||
// Now upscale them to 0..32, so we can use blend32
|
||||
maskR = SkUpscale31To32(maskR);
|
||||
maskG = SkUpscale31To32(maskG);
|
||||
maskB = SkUpscale31To32(maskB);
|
||||
|
||||
// srcA has been upscaled to 256 before passed into this function
|
||||
maskR = maskR * srcA >> 8;
|
||||
maskG = maskG * srcA >> 8;
|
||||
maskB = maskB * srcA >> 8;
|
||||
|
||||
int dstR = SkGetPackedR32(dst);
|
||||
int dstG = SkGetPackedG32(dst);
|
||||
int dstB = SkGetPackedB32(dst);
|
||||
|
||||
// LCD blitting is only supported if the dst is known/required
|
||||
// to be opaque
|
||||
return SkPackARGB32(0xFF,
|
||||
SkBlend32(srcR, dstR, maskR),
|
||||
SkBlend32(srcG, dstG, maskG),
|
||||
SkBlend32(srcB, dstB, maskB));
|
||||
}
|
||||
|
||||
static inline SkPMColor SkBlendLCD16Opaque(int srcR, int srcG, int srcB,
|
||||
SkPMColor dst, uint16_t mask,
|
||||
SkPMColor opaqueDst) {
|
||||
if (mask == 0) {
|
||||
return dst;
|
||||
}
|
||||
|
||||
if (0xFFFF == mask) {
|
||||
return opaqueDst;
|
||||
}
|
||||
|
||||
/* We want all of these in 5bits, hence the shifts in case one of them
|
||||
* (green) is 6bits.
|
||||
*/
|
||||
int maskR = SkGetPackedR16(mask) >> (SK_R16_BITS - 5);
|
||||
int maskG = SkGetPackedG16(mask) >> (SK_G16_BITS - 5);
|
||||
int maskB = SkGetPackedB16(mask) >> (SK_B16_BITS - 5);
|
||||
|
||||
// Now upscale them to 0..32, so we can use blend32
|
||||
maskR = SkUpscale31To32(maskR);
|
||||
maskG = SkUpscale31To32(maskG);
|
||||
maskB = SkUpscale31To32(maskB);
|
||||
|
||||
int dstR = SkGetPackedR32(dst);
|
||||
int dstG = SkGetPackedG32(dst);
|
||||
int dstB = SkGetPackedB32(dst);
|
||||
|
||||
// LCD blitting is only supported if the dst is known/required
|
||||
// to be opaque
|
||||
return SkPackARGB32(0xFF,
|
||||
SkBlend32(srcR, dstR, maskR),
|
||||
SkBlend32(srcG, dstG, maskG),
|
||||
SkBlend32(srcB, dstB, maskB));
|
||||
}
|
||||
|
||||
static inline void SkBlitLCD16Row(SkPMColor dst[], const uint16_t src[],
|
||||
SkColor color, int width, SkPMColor) {
|
||||
int srcA = SkColorGetA(color);
|
||||
int srcR = SkColorGetR(color);
|
||||
int srcG = SkColorGetG(color);
|
||||
int srcB = SkColorGetB(color);
|
||||
|
||||
srcA = SkAlpha255To256(srcA);
|
||||
|
||||
for (int i = 0; i < width; i++) {
|
||||
dst[i] = SkBlendLCD16(srcA, srcR, srcG, srcB, dst[i], src[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void SkBlitLCD16OpaqueRow(SkPMColor dst[], const uint16_t src[],
|
||||
SkColor color, int width,
|
||||
SkPMColor opaqueDst) {
|
||||
int srcR = SkColorGetR(color);
|
||||
int srcG = SkColorGetG(color);
|
||||
int srcB = SkColorGetB(color);
|
||||
|
||||
for (int i = 0; i < width; i++) {
|
||||
dst[i] = SkBlendLCD16Opaque(srcR, srcG, srcB, dst[i], src[i],
|
||||
opaqueDst);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -49,14 +49,13 @@ public:
|
||||
|
||||
virtual GradientType asAGradient(GradientInfo* info) const SK_OVERRIDE;
|
||||
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkColorShader)
|
||||
|
||||
protected:
|
||||
SkColorShader(SkFlattenableReadBuffer&);
|
||||
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) SK_OVERRIDE;
|
||||
virtual Factory getFactory() SK_OVERRIDE;
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
private:
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer&);
|
||||
|
||||
SkColor fColor; // ignored if fInheritColor is true
|
||||
SkPMColor fPMColor; // cached after setContext()
|
||||
|
@ -40,14 +40,13 @@ public:
|
||||
virtual void beginSession();
|
||||
virtual void endSession();
|
||||
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkComposeShader)
|
||||
|
||||
protected:
|
||||
SkComposeShader(SkFlattenableReadBuffer& );
|
||||
virtual void flatten(SkFlattenableWriteBuffer& );
|
||||
virtual Factory getFactory() { return CreateProc; }
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
private:
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer& buffer) {
|
||||
return SkNEW_ARGS(SkComposeShader, (buffer)); }
|
||||
|
||||
SkShader* fShaderA;
|
||||
SkShader* fShaderB;
|
||||
|
@ -65,8 +65,8 @@ public:
|
||||
ReleaseProc proc, void* context);
|
||||
|
||||
/**
|
||||
* Create a new dataref, reference the data ptr as is, and calling
|
||||
* sk_free to delete it.
|
||||
* Create a new dataref from a pointer allocated by malloc. The Data object
|
||||
* takes ownership of that allocation, and will handling calling sk_free.
|
||||
*/
|
||||
static SkData* NewFromMalloc(const void* data, size_t length);
|
||||
|
||||
|
@ -62,7 +62,7 @@ public:
|
||||
* draw into this device such that all of the pixels will
|
||||
* be opaque.
|
||||
*/
|
||||
SkDevice* createCompatibleDevice(SkBitmap::Config config,
|
||||
SkDevice* createCompatibleDevice(SkBitmap::Config config,
|
||||
int width, int height,
|
||||
bool isOpaque);
|
||||
|
||||
@ -139,7 +139,7 @@ public:
|
||||
protected:
|
||||
enum Usage {
|
||||
kGeneral_Usage,
|
||||
kSaveLayer_Usage // <! internal use only
|
||||
kSaveLayer_Usage, // <! internal use only
|
||||
};
|
||||
|
||||
struct TextFlags {
|
||||
@ -258,7 +258,7 @@ protected:
|
||||
* kARGB_8888_Config as SkPMColor
|
||||
*
|
||||
* If the bitmap has pixels already allocated, the device pixels will be
|
||||
* written there. If not, bitmap->allocPixels() will be called
|
||||
* written there. If not, bitmap->allocPixels() will be called
|
||||
* automatically. If the bitmap is backed by a texture readPixels will
|
||||
* fail.
|
||||
*
|
||||
@ -279,11 +279,14 @@ protected:
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/** Update as needed the pixel value in the bitmap, so that the caller can access
|
||||
the pixels directly. Note: only the pixels field should be altered. The config/width/height/rowbytes
|
||||
must remain unchanged.
|
||||
/** Update as needed the pixel value in the bitmap, so that the caller can
|
||||
access the pixels directly. Note: only the pixels field should be
|
||||
altered. The config/width/height/rowbytes must remain unchanged.
|
||||
@param bitmap The device's bitmap
|
||||
@return Echo the bitmap parameter, or an alternate (shadow) bitmap
|
||||
maintained by the subclass.
|
||||
*/
|
||||
virtual void onAccessBitmap(SkBitmap*);
|
||||
virtual const SkBitmap& onAccessBitmap(SkBitmap*);
|
||||
|
||||
SkPixelRef* getPixelRef() const { return fBitmap.pixelRef(); }
|
||||
// just for subclasses, to assign a custom pixelref
|
||||
@ -291,7 +294,7 @@ protected:
|
||||
fBitmap.setPixelRef(pr, offset);
|
||||
return pr;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Implements readPixels API. The caller will ensure that:
|
||||
* 1. bitmap has pixel config kARGB_8888_Config.
|
||||
@ -310,15 +313,33 @@ protected:
|
||||
virtual void unlockPixels();
|
||||
|
||||
/**
|
||||
* Override and return true for filters that the device handles
|
||||
* intrinsically. Returning false means call the filter.
|
||||
* Default impl returns false.
|
||||
* Returns true if the device allows processing of this imagefilter. If
|
||||
* false is returned, then the filter is ignored. This may happen for
|
||||
* some subclasses that do not support pixel manipulations after drawing
|
||||
* has occurred (e.g. printing). The default implementation returns true.
|
||||
*/
|
||||
virtual bool filterImage(SkImageFilter*, const SkBitmap& src,
|
||||
const SkMatrix& ctm,
|
||||
virtual bool allowImageFilter(SkImageFilter*);
|
||||
|
||||
/**
|
||||
* Override and return true for filters that the device can handle
|
||||
* intrinsically. Doing so means that SkCanvas will pass-through this
|
||||
* filter to drawSprite and drawDevice (and potentially filterImage).
|
||||
* Returning false means the SkCanvas will have apply the filter itself,
|
||||
* and just pass the resulting image to the device.
|
||||
*/
|
||||
virtual bool canHandleImageFilter(SkImageFilter*);
|
||||
|
||||
/**
|
||||
* Related (but not required) to canHandleImageFilter, this method returns
|
||||
* true if the device could apply the filter to the src bitmap and return
|
||||
* the result (and updates offset as needed).
|
||||
* If the device does not recognize or support this filter,
|
||||
* it just returns false and leaves result and offset unchanged.
|
||||
*/
|
||||
virtual bool filterImage(SkImageFilter*, const SkBitmap&, const SkMatrix&,
|
||||
SkBitmap* result, SkIPoint* offset);
|
||||
|
||||
// This is equal kBGRA_Premul_Config8888 or kRGBA_Premul_Config8888 if
|
||||
// This is equal kBGRA_Premul_Config8888 or kRGBA_Premul_Config8888 if
|
||||
// either is identical to kNative_Premul_Config8888. Otherwise, -1.
|
||||
static const SkCanvas::Config8888 kPMColorAlias;
|
||||
|
||||
@ -330,18 +351,32 @@ private:
|
||||
friend class SkDeviceFilteredPaint;
|
||||
friend class DeviceImageFilterProxy;
|
||||
|
||||
/**
|
||||
* postSave is called by SkCanvas to inform the device that it has
|
||||
* just completed a save operation. This allows derived
|
||||
* classes to initialize their state-dependent caches.
|
||||
*/
|
||||
virtual void postSave() {};
|
||||
|
||||
/**
|
||||
* preRestore is called by SkCanvas right before it executes a restore
|
||||
* operation. As the partner of postSave, it allows
|
||||
* derived classes to clear their state-dependent caches.
|
||||
*/
|
||||
virtual void preRestore() {};
|
||||
|
||||
// just called by SkCanvas when built as a layer
|
||||
void setOrigin(int x, int y) { fOrigin.set(x, y); }
|
||||
// just called by SkCanvas for saveLayer
|
||||
SkDevice* createCompatibleDeviceForSaveLayer(SkBitmap::Config config,
|
||||
SkDevice* createCompatibleDeviceForSaveLayer(SkBitmap::Config config,
|
||||
int width, int height,
|
||||
bool isOpaque);
|
||||
|
||||
/**
|
||||
* Subclasses should override this to implement createCompatibleDevice.
|
||||
*/
|
||||
virtual SkDevice* onCreateCompatibleDevice(SkBitmap::Config config,
|
||||
int width, int height,
|
||||
virtual SkDevice* onCreateCompatibleDevice(SkBitmap::Config config,
|
||||
int width, int height,
|
||||
bool isOpaque,
|
||||
Usage usage);
|
||||
|
||||
|
95
gfx/skia/include/core/SkDeviceProfile.h
Normal file
95
gfx/skia/include/core/SkDeviceProfile.h
Normal file
@ -0,0 +1,95 @@
|
||||
/*
|
||||
* Copyright 2012 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#ifndef SkDeviceProfile_DEFINED
|
||||
#define SkDeviceProfile_DEFINED
|
||||
|
||||
#include "SkRefCnt.h"
|
||||
|
||||
class SkDeviceProfile : public SkRefCnt {
|
||||
public:
|
||||
enum LCDConfig {
|
||||
kNone_LCDConfig, // disables LCD text rendering, uses A8 instead
|
||||
kRGB_Horizontal_LCDConfig,
|
||||
kBGR_Horizontal_LCDConfig,
|
||||
kRGB_Vertical_LCDConfig,
|
||||
kBGR_Vertical_LCDConfig,
|
||||
};
|
||||
|
||||
enum FontHintLevel {
|
||||
kNone_FontHintLevel,
|
||||
kSlight_FontHintLevel,
|
||||
kNormal_FontHintLevel,
|
||||
kFull_FontHintLevel,
|
||||
kAuto_FontHintLevel,
|
||||
};
|
||||
|
||||
/**
|
||||
* gammaExp is typically between 1.0 and 2.2. For no gamma adjustment,
|
||||
* specify 1.0
|
||||
*
|
||||
* contrastScale will be pinned between 0.0 and 1.0. For no contrast
|
||||
* adjustment, specify 0.0
|
||||
*
|
||||
* @param config Describes the LCD layout for this device. If this is set
|
||||
* to kNone, then all requests for LCD text will be
|
||||
* devolved to A8 antialiasing.
|
||||
*
|
||||
* @param level The hinting level to be used, IF the paint specifies
|
||||
* "default". Otherwise the paint's hinting level will be
|
||||
* respected.
|
||||
*/
|
||||
static SkDeviceProfile* Create(float gammaExp,
|
||||
float contrastScale,
|
||||
LCDConfig,
|
||||
FontHintLevel);
|
||||
|
||||
/**
|
||||
* Returns the global default profile, that is used if no global profile is
|
||||
* specified with SetGlobal(), or if NULL is specified to SetGlobal().
|
||||
* The references count is *not* incremented, and the caller should not
|
||||
* call unref().
|
||||
*/
|
||||
static SkDeviceProfile* GetDefault();
|
||||
|
||||
/**
|
||||
* Return the current global profile (or the default if no global had yet
|
||||
* been set) and increment its reference count. The call *must* call unref()
|
||||
* when it is done using it.
|
||||
*/
|
||||
static SkDeviceProfile* RefGlobal();
|
||||
|
||||
/**
|
||||
* Make the specified profile be the global value for all subsequently
|
||||
* instantiated devices. Does not affect any existing devices.
|
||||
* Increments the reference count on the profile.
|
||||
* Specify NULL for the "identity" profile (where there is no gamma or
|
||||
* contrast correction).
|
||||
*/
|
||||
static void SetGlobal(SkDeviceProfile*);
|
||||
|
||||
float getFontGammaExponent() const { return fGammaExponent; }
|
||||
float getFontContrastScale() const { return fContrastScale; }
|
||||
|
||||
/**
|
||||
* Given a luminance byte (0 for black, 0xFF for white), generate a table
|
||||
* that applies the gamma/contrast settings to linear coverage values.
|
||||
*/
|
||||
void generateTableForLuminanceByte(U8CPU lumByte, uint8_t table[256]) const;
|
||||
|
||||
private:
|
||||
SkDeviceProfile(float gammaExp, float contrastScale, LCDConfig,
|
||||
FontHintLevel);
|
||||
|
||||
float fGammaExponent;
|
||||
float fContrastScale;
|
||||
LCDConfig fLCDConfig;
|
||||
FontHintLevel fFontHintLevel;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -77,7 +77,8 @@ public:
|
||||
*/
|
||||
static bool DrawToMask(const SkPath& devPath, const SkIRect* clipBounds,
|
||||
SkMaskFilter* filter, const SkMatrix* filterMatrix,
|
||||
SkMask* mask, SkMask::CreateMode mode);
|
||||
SkMask* mask, SkMask::CreateMode mode,
|
||||
SkPaint::Style style);
|
||||
|
||||
enum RectType {
|
||||
kHair_RectType,
|
||||
@ -128,8 +129,8 @@ class SkGlyphCache;
|
||||
|
||||
class SkTextToPathIter {
|
||||
public:
|
||||
SkTextToPathIter(const char text[], size_t length, const SkPaint&,
|
||||
bool applyStrokeAndPathEffects, bool forceLinearTextOn);
|
||||
SkTextToPathIter(const char text[], size_t length, const SkPaint& paint,
|
||||
bool applyStrokeAndPathEffects);
|
||||
~SkTextToPathIter();
|
||||
|
||||
const SkPaint& getPaint() const { return fPaint; }
|
||||
|
@ -45,6 +45,20 @@ public:
|
||||
*/
|
||||
virtual bool next(SkCanvas*, SkPaint* paint) = 0;
|
||||
|
||||
/**
|
||||
* The fast bounds functions are used to enable the paint to be culled early
|
||||
* in the drawing pipeline. If a subclass can support this feature it must
|
||||
* return true for the canComputeFastBounds() function. If that function
|
||||
* returns false then computeFastBounds behavior is undefined otherwise it
|
||||
* is expected to have the following behavior. Given the parent paint and
|
||||
* the parent's bounding rect the subclass must fill in and return the
|
||||
* storage rect, where the storage rect is with the union of the src rect
|
||||
* and the looper's bounding rect.
|
||||
*/
|
||||
virtual bool canComputeFastBounds(const SkPaint& paint);
|
||||
virtual void computeFastBounds(const SkPaint& paint,
|
||||
const SkRect& src, SkRect* dst);
|
||||
|
||||
protected:
|
||||
SkDrawLooper() {}
|
||||
SkDrawLooper(SkFlattenableReadBuffer& buffer) : INHERITED(buffer) {}
|
||||
|
@ -30,11 +30,10 @@ public:
|
||||
virtual void shadeSpan16(int x, int y, uint16_t span[], int count) SK_OVERRIDE;
|
||||
virtual void shadeSpanAlpha(int x, int y, uint8_t alpha[], int count) SK_OVERRIDE;
|
||||
|
||||
protected:
|
||||
SkEmptyShader(SkFlattenableReadBuffer&);
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkEmptyShader)
|
||||
|
||||
virtual Factory getFactory() SK_OVERRIDE;
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) SK_OVERRIDE;
|
||||
protected:
|
||||
SkEmptyShader(SkFlattenableReadBuffer& buffer) : INHERITED(buffer) {}
|
||||
|
||||
private:
|
||||
typedef SkShader INHERITED;
|
||||
|
@ -31,8 +31,11 @@
|
||||
*/
|
||||
static inline uint16_t SkEndianSwap16(U16CPU value) {
|
||||
SkASSERT(value == (uint16_t)value);
|
||||
return (uint16_t)((value >> 8) | (value << 8));
|
||||
return static_cast<uint16_t>((value >> 8) | (value << 8));
|
||||
}
|
||||
template<uint16_t N> struct SkTEndianSwap16 {
|
||||
static const uint16_t value = static_cast<uint16_t>((N >> 8) | ((N & 0xFF) << 8));
|
||||
};
|
||||
|
||||
/** Vector version of SkEndianSwap16(), which swaps the
|
||||
low two bytes of each value in the array.
|
||||
@ -55,6 +58,12 @@ static inline uint32_t SkEndianSwap32(uint32_t value) {
|
||||
((value & 0xFF0000) >> 8) |
|
||||
(value >> 24);
|
||||
}
|
||||
template<uint32_t N> struct SkTEndianSwap32 {
|
||||
static const uint32_t value = ((N & 0xFF) << 24) |
|
||||
((N & 0xFF00) << 8) |
|
||||
((N & 0xFF0000) >> 8) |
|
||||
(N >> 24);
|
||||
};
|
||||
|
||||
/** Vector version of SkEndianSwap16(), which swaps the
|
||||
bytes of each value in the array.
|
||||
@ -73,11 +82,21 @@ static inline void SkEndianSwap32s(uint32_t array[], int count) {
|
||||
#define SkEndian_SwapBE32(n) SkEndianSwap32(n)
|
||||
#define SkEndian_SwapLE16(n) (n)
|
||||
#define SkEndian_SwapLE32(n) (n)
|
||||
|
||||
#define SkTEndian_SwapBE16(n) SkTEndianSwap16<n>::value
|
||||
#define SkTEndian_SwapBE32(n) SkTEndianSwap32<n>::value
|
||||
#define SkTEndian_SwapLE16(n) (n)
|
||||
#define SkTEndian_SwapLE32(n) (n)
|
||||
#else // SK_CPU_BENDIAN
|
||||
#define SkEndian_SwapBE16(n) (n)
|
||||
#define SkEndian_SwapBE32(n) (n)
|
||||
#define SkEndian_SwapLE16(n) SkEndianSwap16(n)
|
||||
#define SkEndian_SwapLE32(n) SkEndianSwap32(n)
|
||||
|
||||
#define SkTEndian_SwapBE16(n) (n)
|
||||
#define SkTEndian_SwapBE32(n) (n)
|
||||
#define SkTEndian_SwapLE16(n) SkTEndianSwap16<n>::value
|
||||
#define SkTEndian_SwapLE32(n) SkTEndianSwap32<n>::value
|
||||
#endif
|
||||
|
||||
// When a bytestream is embedded in a 32-bit word, how far we need to
|
||||
@ -94,5 +113,40 @@ static inline void SkEndianSwap32s(uint32_t array[], int count) {
|
||||
#define SkEndian_Byte3Shift 0
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(SK_UINT8_BITFIELD_LENDIAN) && defined(SK_UINT8_BITFIELD_BENDIAN)
|
||||
#error "can't have both bitfield LENDIAN and BENDIAN defined"
|
||||
#endif
|
||||
|
||||
#if !defined(SK_UINT8_BITFIELD_LENDIAN) && !defined(SK_UINT8_BITFIELD_BENDIAN)
|
||||
#ifdef SK_CPU_LENDIAN
|
||||
#define SK_UINT8_BITFIELD_LENDIAN
|
||||
#else
|
||||
#define SK_UINT8_BITFIELD_BENDIAN
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef SK_UINT8_BITFIELD_LENDIAN
|
||||
#define SK_UINT8_BITFIELD(f0, f1, f2, f3, f4, f5, f6, f7) \
|
||||
SK_OT_BYTE f0 : 1; \
|
||||
SK_OT_BYTE f1 : 1; \
|
||||
SK_OT_BYTE f2 : 1; \
|
||||
SK_OT_BYTE f3 : 1; \
|
||||
SK_OT_BYTE f4 : 1; \
|
||||
SK_OT_BYTE f5 : 1; \
|
||||
SK_OT_BYTE f6 : 1; \
|
||||
SK_OT_BYTE f7 : 1;
|
||||
#else
|
||||
#define SK_UINT8_BITFIELD(f0, f1, f2, f3, f4, f5, f6, f7) \
|
||||
SK_OT_BYTE f7 : 1; \
|
||||
SK_OT_BYTE f6 : 1; \
|
||||
SK_OT_BYTE f5 : 1; \
|
||||
SK_OT_BYTE f4 : 1; \
|
||||
SK_OT_BYTE f3 : 1; \
|
||||
SK_OT_BYTE f2 : 1; \
|
||||
SK_OT_BYTE f1 : 1; \
|
||||
SK_OT_BYTE f0 : 1;
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -264,4 +264,20 @@ inline bool SkFixedNearlyZero(SkFixed x, SkFixed tolerance = SK_FixedNearlyZero)
|
||||
#define SkFixedMulAdd(x, y, a) (SkFixedMul(x, y) + (a))
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
typedef int64_t SkFixed48;
|
||||
|
||||
#define SkIntToFixed48(x) ((SkFixed48)(x) << 48)
|
||||
#define SkFixed48ToInt(x) ((int)((x) >> 48))
|
||||
#define SkFixedToFixed48(x) ((SkFixed48)(x) << 32)
|
||||
#define SkFixed48ToFixed(x) ((SkFixed)((x) >> 32))
|
||||
#define SkFloatToFixed48(x) ((SkFixed48)((x) * (65536.0f * 65536.0f * 65536.0f)))
|
||||
|
||||
#ifdef SK_SCALAR_IS_FLOAT
|
||||
#define SkScalarToFixed48(x) SkFloatToFixed48(x)
|
||||
#else
|
||||
#define SkScalarToFixed48(x) SkFixedToFixed48(x)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -12,6 +12,8 @@
|
||||
|
||||
#include "SkRefCnt.h"
|
||||
#include "SkBitmap.h"
|
||||
#include "SkPath.h"
|
||||
#include "SkPoint.h"
|
||||
#include "SkReader32.h"
|
||||
#include "SkTDArray.h"
|
||||
#include "SkWriter32.h"
|
||||
@ -22,38 +24,42 @@ class SkString;
|
||||
|
||||
#if SK_ALLOW_STATIC_GLOBAL_INITIALIZERS
|
||||
|
||||
#define SK_DECLARE_FLATTENABLE_REGISTRAR()
|
||||
|
||||
#define SK_DEFINE_FLATTENABLE_REGISTRAR(flattenable) \
|
||||
static SkFlattenable::Registrar g##flattenable##Reg(#flattenable, \
|
||||
flattenable::CreateProc);
|
||||
|
||||
#define SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(flattenable)
|
||||
flattenable::CreateProc);
|
||||
#define SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(flattenable) \
|
||||
static SkFlattenable::Registrar g##flattenable##Reg(#flattenable, \
|
||||
flattenable::CreateProc);
|
||||
flattenable::CreateProc);
|
||||
|
||||
#define SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP()
|
||||
#define SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(flattenable)
|
||||
#define SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_END
|
||||
|
||||
#else
|
||||
|
||||
#define SK_DECLARE_FLATTENABLE_REGISTRAR() static void Init();
|
||||
|
||||
#define SK_DEFINE_FLATTENABLE_REGISTRAR(flattenable) \
|
||||
void flattenable::Init() { \
|
||||
SkFlattenable::Registrar(#flattenable, CreateProc); \
|
||||
}
|
||||
|
||||
#define SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(flattenable) \
|
||||
void flattenable::Init() {
|
||||
|
||||
#define SK_DEFINE_FLATTENABLE_REGISTRAR(flattenable)
|
||||
#define SK_DEFINE_FLATTENABLE_REGISTRAR_ENTRY(flattenable) \
|
||||
SkFlattenable::Registrar(#flattenable, flattenable::CreateProc);
|
||||
|
||||
|
||||
#define SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP() static void InitializeFlattenables();
|
||||
|
||||
#define SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_START(flattenable) \
|
||||
void flattenable::InitializeFlattenables() {
|
||||
|
||||
#define SK_DEFINE_FLATTENABLE_REGISTRAR_GROUP_END \
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#define SK_DECLARE_UNFLATTENABLE_OBJECT() \
|
||||
virtual Factory getFactory() SK_OVERRIDE { return NULL; }; \
|
||||
|
||||
#define SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(flattenable) \
|
||||
virtual Factory getFactory() SK_OVERRIDE { return CreateProc; }; \
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer& buffer) { \
|
||||
return SkNEW_ARGS(flattenable, (buffer)); \
|
||||
}
|
||||
|
||||
/** \class SkFlattenable
|
||||
|
||||
SkFlattenable is the base class for objects that need to be flattened
|
||||
@ -71,30 +77,25 @@ public:
|
||||
override of flatten().
|
||||
*/
|
||||
virtual Factory getFactory() = 0;
|
||||
/** Override this to write data specific to your subclass into the buffer,
|
||||
being sure to call your super-class' version first. This data will later
|
||||
be passed to your Factory function, returned by getFactory().
|
||||
*/
|
||||
virtual void flatten(SkFlattenableWriteBuffer&);
|
||||
|
||||
/** Set the string to describe the sublass and return true. If this is not
|
||||
overridden, ignore the string param and return false.
|
||||
*/
|
||||
virtual bool toDumpString(SkString*) const;
|
||||
|
||||
static Factory NameToFactory(const char name[]);
|
||||
static const char* FactoryToName(Factory);
|
||||
static void Register(const char name[], Factory);
|
||||
|
||||
|
||||
class Registrar {
|
||||
public:
|
||||
Registrar(const char name[], Factory factory) {
|
||||
SkFlattenable::Register(name, factory);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
protected:
|
||||
SkFlattenable(SkFlattenableReadBuffer&) {}
|
||||
/** Override this to write data specific to your subclass into the buffer,
|
||||
being sure to call your super-class' version first. This data will later
|
||||
be passed to your Factory function, returned by getFactory().
|
||||
*/
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const;
|
||||
|
||||
private:
|
||||
#if !SK_ALLOW_STATIC_GLOBAL_INITIALIZERS
|
||||
@ -102,6 +103,7 @@ private:
|
||||
#endif
|
||||
|
||||
friend class SkGraphics;
|
||||
friend class SkFlattenableWriteBuffer;
|
||||
};
|
||||
|
||||
// helpers for matrix and region
|
||||
@ -119,12 +121,38 @@ extern void SkWriteRegion(SkWriter32*, const SkRegion&);
|
||||
|
||||
class SkTypeface;
|
||||
|
||||
class SkFlattenableReadBuffer : public SkReader32 {
|
||||
class SkFlattenableReadBuffer {
|
||||
public:
|
||||
SkFlattenableReadBuffer();
|
||||
explicit SkFlattenableReadBuffer(const void* data);
|
||||
SkFlattenableReadBuffer(const void* data, size_t size);
|
||||
|
||||
virtual ~SkFlattenableReadBuffer() {}
|
||||
|
||||
|
||||
virtual uint8_t readU8() = 0;
|
||||
virtual uint16_t readU16() = 0;
|
||||
virtual uint32_t readU32() = 0;
|
||||
virtual void read(void* dst, size_t size) = 0;
|
||||
virtual bool readBool() = 0;
|
||||
virtual int32_t readInt() = 0;
|
||||
virtual SkScalar readScalar() = 0;
|
||||
virtual const void* skip(size_t size) = 0;
|
||||
|
||||
virtual int32_t readS32() { return readInt(); }
|
||||
template <typename T> const T& skipT() {
|
||||
SkASSERT(SkAlign4(sizeof(T)) == sizeof(T));
|
||||
return *(const T*)this->skip(sizeof(T));
|
||||
}
|
||||
|
||||
virtual void readMatrix(SkMatrix*) = 0;
|
||||
virtual void readPath(SkPath*) = 0;
|
||||
virtual void readPoint(SkPoint*) = 0;
|
||||
|
||||
// helper function for classes with const SkPoint members
|
||||
SkPoint readPoint() {
|
||||
SkPoint point;
|
||||
this->readPoint(&point);
|
||||
return point;
|
||||
}
|
||||
|
||||
void setRefCntArray(SkRefCnt* array[], int count) {
|
||||
fRCArray = array;
|
||||
fRCCount = count;
|
||||
@ -156,12 +184,12 @@ public:
|
||||
fFactoryCount = 0;
|
||||
}
|
||||
|
||||
SkTypeface* readTypeface();
|
||||
SkRefCnt* readRefCnt();
|
||||
void* readFunctionPtr();
|
||||
SkFlattenable* readFlattenable();
|
||||
virtual SkTypeface* readTypeface() = 0;
|
||||
virtual SkRefCnt* readRefCnt() = 0;
|
||||
virtual void* readFunctionPtr() = 0;
|
||||
virtual SkFlattenable* readFlattenable() = 0;
|
||||
|
||||
private:
|
||||
protected:
|
||||
SkRefCnt** fRCArray;
|
||||
int fRCCount;
|
||||
|
||||
@ -171,8 +199,6 @@ private:
|
||||
SkTDArray<SkFlattenable::Factory>* fFactoryTDArray;
|
||||
SkFlattenable::Factory* fFactoryArray;
|
||||
int fFactoryCount;
|
||||
|
||||
typedef SkReader32 INHERITED;
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
@ -187,7 +213,7 @@ private:
|
||||
class SkRefCntSet : public SkTPtrSet<SkRefCnt*> {
|
||||
public:
|
||||
virtual ~SkRefCntSet();
|
||||
|
||||
|
||||
protected:
|
||||
// overrides
|
||||
virtual void incPtr(void*);
|
||||
@ -196,22 +222,47 @@ protected:
|
||||
|
||||
class SkFactorySet : public SkTPtrSet<SkFlattenable::Factory> {};
|
||||
|
||||
class SkFlattenableWriteBuffer : public SkWriter32 {
|
||||
class SkFlattenableWriteBuffer {
|
||||
public:
|
||||
SkFlattenableWriteBuffer(size_t minSize);
|
||||
SkFlattenableWriteBuffer();
|
||||
virtual ~SkFlattenableWriteBuffer();
|
||||
|
||||
// deprecated naming convention that will be removed after callers are updated
|
||||
virtual bool writeBool(bool value) = 0;
|
||||
virtual void writeInt(int32_t value) = 0;
|
||||
virtual void write8(int32_t value) = 0;
|
||||
virtual void write16(int32_t value) = 0;
|
||||
virtual void write32(int32_t value) = 0;
|
||||
virtual void writeScalar(SkScalar value) = 0;
|
||||
virtual void writeMul4(const void* values, size_t size) = 0;
|
||||
|
||||
virtual void writePad(const void* src, size_t size) = 0;
|
||||
virtual void writeString(const char* str, size_t len = (size_t)-1) = 0;
|
||||
virtual uint32_t* reserve(size_t size) = 0;
|
||||
virtual void flatten(void* dst) = 0;
|
||||
virtual uint32_t size() = 0;
|
||||
virtual void write(const void* values, size_t size) = 0;
|
||||
virtual void writeRect(const SkRect& rect) = 0;
|
||||
virtual size_t readFromStream(SkStream*, size_t length) = 0;
|
||||
|
||||
virtual void writeMatrix(const SkMatrix& matrix) = 0;
|
||||
virtual void writePath(const SkPath& path) = 0;
|
||||
virtual void writePoint(const SkPoint& point) = 0;
|
||||
|
||||
virtual bool writeToStream(SkWStream*) = 0;
|
||||
|
||||
virtual void writeFunctionPtr(void*)= 0;
|
||||
virtual void writeFlattenable(SkFlattenable* flattenable)= 0;
|
||||
|
||||
void writeTypeface(SkTypeface*);
|
||||
void writeRefCnt(SkRefCnt*);
|
||||
void writeFunctionPtr(void*);
|
||||
void writeFlattenable(SkFlattenable* flattenable);
|
||||
|
||||
void writeRefCnt(SkRefCnt* obj);
|
||||
|
||||
SkRefCntSet* getTypefaceRecorder() const { return fTFSet; }
|
||||
SkRefCntSet* setTypefaceRecorder(SkRefCntSet*);
|
||||
|
||||
|
||||
SkRefCntSet* getRefCntRecorder() const { return fRCSet; }
|
||||
SkRefCntSet* setRefCntRecorder(SkRefCntSet*);
|
||||
|
||||
|
||||
SkFactorySet* getFactoryRecorder() const { return fFactorySet; }
|
||||
SkFactorySet* setFactoryRecorder(SkFactorySet*);
|
||||
|
||||
@ -221,11 +272,11 @@ public:
|
||||
* Instructs the writer to inline Factory names as there are seen the
|
||||
* first time (after that we store an index). The pipe code uses this.
|
||||
*/
|
||||
kInlineFactoryNames_Flag = 0x02
|
||||
kInlineFactoryNames_Flag = 0x02,
|
||||
};
|
||||
Flags getFlags() const { return (Flags)fFlags; }
|
||||
void setFlags(Flags flags) { fFlags = flags; }
|
||||
|
||||
|
||||
bool isCrossProcess() const {
|
||||
return SkToBool(fFlags & kCrossProcess_Flag);
|
||||
}
|
||||
@ -236,16 +287,21 @@ public:
|
||||
bool persistBitmapPixels() const {
|
||||
return (fFlags & kCrossProcess_Flag) != 0;
|
||||
}
|
||||
|
||||
|
||||
bool persistTypeface() const { return (fFlags & kCrossProcess_Flag) != 0; }
|
||||
|
||||
private:
|
||||
protected:
|
||||
|
||||
// A helper function so that each subclass does not have to be a friend of
|
||||
// SkFlattenable.
|
||||
void flattenObject(SkFlattenable* obj, SkFlattenableWriteBuffer& buffer) {
|
||||
obj->flatten(buffer);
|
||||
}
|
||||
|
||||
uint32_t fFlags;
|
||||
SkRefCntSet* fTFSet;
|
||||
SkRefCntSet* fRCSet;
|
||||
SkFactorySet* fFactorySet;
|
||||
|
||||
typedef SkWriter32 INHERITED;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -17,8 +17,6 @@ class SkDescriptor;
|
||||
class SkStream;
|
||||
class SkWStream;
|
||||
|
||||
typedef uint32_t SkFontTableTag;
|
||||
|
||||
/** \class SkFontHost
|
||||
|
||||
This class is ported to each environment. It is responsible for bridging
|
||||
@ -52,15 +50,13 @@ class SK_API SkFontHost {
|
||||
public:
|
||||
/** Return a new, closest matching typeface given either an existing family
|
||||
(specified by a typeface in that family) or by a familyName and a
|
||||
requested style, or by a set of Unicode codepoitns to cover in a given
|
||||
style.
|
||||
requested style.
|
||||
1) If familyFace is null, use familyName.
|
||||
2) If familyName is null, use data (UTF-16 to cover).
|
||||
3) If all are null, return the default font that best matches style
|
||||
*/
|
||||
static SkTypeface* CreateTypeface(const SkTypeface* familyFace,
|
||||
const char familyName[],
|
||||
const void* data, size_t bytelength,
|
||||
SkTypeface::Style style);
|
||||
|
||||
/** Return a new typeface given the data buffer. If the data does not
|
||||
@ -84,12 +80,6 @@ public:
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/** Returns true if the specified unique ID matches an existing font.
|
||||
Returning false is similar to calling OpenStream with an invalid ID,
|
||||
which will return NULL in that case.
|
||||
*/
|
||||
static bool ValidFontID(SkFontID uniqueID);
|
||||
|
||||
/** Return a new stream to read the font data, or null if the uniqueID does
|
||||
not match an existing typeface. .The caller must call stream->unref()
|
||||
when it is finished reading the data.
|
||||
@ -250,7 +240,7 @@ public:
|
||||
*/
|
||||
enum LCDOrientation {
|
||||
kHorizontal_LCDOrientation = 0, //!< this is the default
|
||||
kVertical_LCDOrientation = 1
|
||||
kVertical_LCDOrientation = 1,
|
||||
};
|
||||
|
||||
static void SetSubpixelOrientation(LCDOrientation orientation);
|
||||
@ -269,7 +259,7 @@ public:
|
||||
enum LCDOrder {
|
||||
kRGB_LCDOrder = 0, //!< this is the default
|
||||
kBGR_LCDOrder = 1,
|
||||
kNONE_LCDOrder = 2
|
||||
kNONE_LCDOrder = 2,
|
||||
};
|
||||
|
||||
static void SetSubpixelOrder(LCDOrder order);
|
||||
|
@ -12,7 +12,7 @@
|
||||
|
||||
#include "SkTypes.h"
|
||||
|
||||
class SkGraphics {
|
||||
class SK_API SkGraphics {
|
||||
public:
|
||||
/**
|
||||
* Call this at process initialization time if your environment does not
|
||||
@ -65,6 +65,26 @@ public:
|
||||
*/
|
||||
static void SetFlags(const char* flags);
|
||||
|
||||
/**
|
||||
* Return the max number of bytes that should be used by the thread-local
|
||||
* font cache.
|
||||
* If the cache needs to allocate more, it will purge previous entries.
|
||||
* This max can be changed by calling SetFontCacheLimit().
|
||||
*
|
||||
* If this thread has never called SetTLSFontCacheLimit, or has called it
|
||||
* with 0, then this thread is using the shared font cache. In that case,
|
||||
* this function will always return 0, and the caller may want to call
|
||||
* GetFontCacheLimit.
|
||||
*/
|
||||
static size_t GetTLSFontCacheLimit();
|
||||
|
||||
/**
|
||||
* Specify the max number of bytes that should be used by the thread-local
|
||||
* font cache. If this value is 0, then this thread will use the shared
|
||||
* global font cache.
|
||||
*/
|
||||
static void SetTLSFontCacheLimit(size_t bytes);
|
||||
|
||||
private:
|
||||
/** This is automatically called by SkGraphics::Init(), and must be
|
||||
implemented by the host OS. This allows the host OS to register a callback
|
||||
|
@ -40,8 +40,11 @@ class SK_API SkImageFilter : public SkFlattenable {
|
||||
public:
|
||||
class Proxy {
|
||||
public:
|
||||
virtual ~Proxy() {};
|
||||
|
||||
virtual SkDevice* createDevice(int width, int height) = 0;
|
||||
|
||||
// returns true if the proxy can handle this filter natively
|
||||
virtual bool canHandleImageFilter(SkImageFilter*) = 0;
|
||||
// returns true if the proxy handled the filter itself. if this returns
|
||||
// false then the filter's code will be called.
|
||||
virtual bool filterImage(SkImageFilter*, const SkBitmap& src,
|
||||
@ -79,6 +82,22 @@ public:
|
||||
*/
|
||||
virtual bool asABlur(SkSize* sigma) const;
|
||||
|
||||
/**
|
||||
* Experimental.
|
||||
*
|
||||
* If the filter can be expressed as an erode, return true and
|
||||
* set the radius in X and Y.
|
||||
*/
|
||||
virtual bool asAnErode(SkISize* radius) const;
|
||||
|
||||
/**
|
||||
* Experimental.
|
||||
*
|
||||
* If the filter can be expressed as a dilation, return true and
|
||||
* set the radius in X and Y.
|
||||
*/
|
||||
virtual bool asADilate(SkISize* radius) const;
|
||||
|
||||
protected:
|
||||
SkImageFilter() {}
|
||||
explicit SkImageFilter(SkFlattenableReadBuffer& rb) : INHERITED(rb) {}
|
||||
|
@ -28,22 +28,15 @@ public:
|
||||
size_t getSize() const { return fSize; }
|
||||
void* getAddr() const { return fStorage; }
|
||||
|
||||
// overrides from SkPixelRef
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const;
|
||||
virtual Factory getFactory() const {
|
||||
return Create;
|
||||
}
|
||||
static SkPixelRef* Create(SkFlattenableReadBuffer& buffer) {
|
||||
return SkNEW_ARGS(SkMallocPixelRef, (buffer));
|
||||
}
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkMallocPixelRef)
|
||||
|
||||
SK_DECLARE_PIXEL_REF_REGISTRAR()
|
||||
protected:
|
||||
// overrides from SkPixelRef
|
||||
virtual void* onLockPixels(SkColorTable**);
|
||||
virtual void onUnlockPixels();
|
||||
|
||||
SkMallocPixelRef(SkFlattenableReadBuffer& buffer);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
private:
|
||||
void* fStorage;
|
||||
|
@ -12,6 +12,7 @@
|
||||
|
||||
#include "SkFlattenable.h"
|
||||
#include "SkMask.h"
|
||||
#include "SkPaint.h"
|
||||
|
||||
class SkBlitter;
|
||||
class SkBounder;
|
||||
@ -55,14 +56,12 @@ public:
|
||||
virtual bool filterMask(SkMask* dst, const SkMask& src, const SkMatrix&,
|
||||
SkIPoint* margin);
|
||||
|
||||
virtual void flatten(SkFlattenableWriteBuffer& ) {}
|
||||
|
||||
enum BlurType {
|
||||
kNone_BlurType, //!< this maskfilter is not a blur
|
||||
kNormal_BlurType, //!< fuzzy inside and outside
|
||||
kSolid_BlurType, //!< solid inside, fuzzy outside
|
||||
kOuter_BlurType, //!< nothing inside, fuzzy outside
|
||||
kInner_BlurType //!< fuzzy inside, nothing outside
|
||||
kInner_BlurType, //!< fuzzy inside, nothing outside
|
||||
};
|
||||
|
||||
struct BlurInfo {
|
||||
@ -79,9 +78,22 @@ public:
|
||||
*/
|
||||
virtual BlurType asABlur(BlurInfo*) const;
|
||||
|
||||
/**
|
||||
* The fast bounds function is used to enable the paint to be culled early
|
||||
* in the drawing pipeline. This function accepts the current bounds of the
|
||||
* paint as its src param and the filter adjust those bounds using its
|
||||
* current mask and returns the result using the dest param. Callers are
|
||||
* allowed to provide the same struct for both src and dest so each
|
||||
* implementation must accomodate that behavior.
|
||||
*
|
||||
* The default impl calls filterMask with the src mask having no image,
|
||||
* but subclasses may override this if they can compute the rect faster.
|
||||
*/
|
||||
virtual void computeFastBounds(const SkRect& src, SkRect* dest);
|
||||
|
||||
protected:
|
||||
// empty for now, but lets get our subclass to remember to init us for the future
|
||||
SkMaskFilter(SkFlattenableReadBuffer&) {}
|
||||
SkMaskFilter(SkFlattenableReadBuffer& buffer) : INHERITED(buffer) {}
|
||||
|
||||
private:
|
||||
friend class SkDraw;
|
||||
@ -92,7 +104,10 @@ private:
|
||||
This method is not exported to java.
|
||||
*/
|
||||
bool filterPath(const SkPath& devPath, const SkMatrix& devMatrix,
|
||||
const SkRasterClip&, SkBounder*, SkBlitter* blitter);
|
||||
const SkRasterClip&, SkBounder*, SkBlitter* blitter,
|
||||
SkPaint::Style style);
|
||||
|
||||
typedef SkFlattenable INHERITED;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -153,7 +153,10 @@ static inline bool SkIsPow2(int value) {
|
||||
With this requirement, we can generate faster instructions on some
|
||||
architectures.
|
||||
*/
|
||||
#ifdef SK_ARM_HAS_EDSP
|
||||
#if defined(__arm__) \
|
||||
&& !defined(__thumb__) \
|
||||
&& !defined(__ARM_ARCH_4T__) \
|
||||
&& !defined(__ARM_ARCH_5T__)
|
||||
static inline int32_t SkMulS16(S16CPU x, S16CPU y) {
|
||||
SkASSERT((int16_t)x == x);
|
||||
SkASSERT((int16_t)y == y);
|
||||
|
@ -336,7 +336,7 @@ public:
|
||||
set inverse to be the inverse of this matrix. If this matrix cannot be
|
||||
inverted, ignore inverse and return false
|
||||
*/
|
||||
bool invert(SkMatrix* inverse) const;
|
||||
bool SK_WARN_UNUSED_RESULT invert(SkMatrix* inverse) const;
|
||||
|
||||
/** Fills the passed array with affine identity values
|
||||
in column major order.
|
||||
@ -483,20 +483,28 @@ public:
|
||||
*/
|
||||
bool fixedStepInX(SkScalar y, SkFixed* stepX, SkFixed* stepY) const;
|
||||
|
||||
#ifdef SK_SCALAR_IS_FIXED
|
||||
friend bool operator==(const SkMatrix& a, const SkMatrix& b) {
|
||||
return memcmp(a.fMat, b.fMat, sizeof(a.fMat)) == 0;
|
||||
/** Efficient comparison of two matrices. It distinguishes between zero and
|
||||
* negative zero. It will return false when the sign of zero values is the
|
||||
* only difference between the two matrices. It considers NaN values to be
|
||||
* equal to themselves. So a matrix full of NaNs is "cheap equal" to
|
||||
* another matrix full of NaNs iff the NaN values are bitwise identical
|
||||
* while according to strict the strict == test a matrix with a NaN value
|
||||
* is equal to nothing, including itself.
|
||||
*/
|
||||
bool cheapEqualTo(const SkMatrix& m) const {
|
||||
return 0 == memcmp(fMat, m.fMat, sizeof(fMat));
|
||||
}
|
||||
|
||||
friend bool operator!=(const SkMatrix& a, const SkMatrix& b) {
|
||||
return memcmp(a.fMat, b.fMat, sizeof(a.fMat)) != 0;
|
||||
#ifdef SK_SCALAR_IS_FIXED
|
||||
friend bool operator==(const SkMatrix& a, const SkMatrix& b) {
|
||||
return a.cheapEqualTo(b);
|
||||
}
|
||||
#else
|
||||
friend bool operator==(const SkMatrix& a, const SkMatrix& b);
|
||||
friend bool operator==(const SkMatrix& a, const SkMatrix& b);
|
||||
#endif
|
||||
friend bool operator!=(const SkMatrix& a, const SkMatrix& b) {
|
||||
return !(a == b);
|
||||
}
|
||||
#endif
|
||||
|
||||
enum {
|
||||
// flatten/unflatten will never return a value larger than this
|
||||
@ -541,7 +549,7 @@ private:
|
||||
enum {
|
||||
/** Set if the matrix will map a rectangle to another rectangle. This
|
||||
can be true if the matrix is scale-only, or rotates a multiple of
|
||||
90 degrees. This bit is not set if the matrix is identity.
|
||||
90 degrees.
|
||||
|
||||
This bit will be set on identity matrices
|
||||
*/
|
||||
@ -566,8 +574,8 @@ private:
|
||||
kRectStaysRect_Mask
|
||||
};
|
||||
|
||||
SkScalar fMat[9];
|
||||
mutable uint8_t fTypeMask;
|
||||
SkScalar fMat[9];
|
||||
mutable uint32_t fTypeMask;
|
||||
|
||||
uint8_t computeTypeMask() const;
|
||||
uint8_t computePerspectiveTypeMask() const;
|
||||
|
61
gfx/skia/include/core/SkOrderedReadBuffer.h
Normal file
61
gfx/skia/include/core/SkOrderedReadBuffer.h
Normal file
@ -0,0 +1,61 @@
|
||||
|
||||
/*
|
||||
* Copyright 2011 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#ifndef SkOrderedReadBuffer_DEFINED
|
||||
#define SkOrderedReadBuffer_DEFINED
|
||||
|
||||
#include "SkRefCnt.h"
|
||||
#include "SkBitmap.h"
|
||||
#include "SkFlattenable.h"
|
||||
#include "SkWriter32.h"
|
||||
#include "SkPath.h"
|
||||
|
||||
class SkOrderedReadBuffer : public SkFlattenableReadBuffer {
|
||||
public:
|
||||
SkOrderedReadBuffer() : INHERITED() {}
|
||||
SkOrderedReadBuffer(const void* data, size_t size);
|
||||
|
||||
void setMemory(const void* data, size_t size) { fReader.setMemory(data, size); }
|
||||
uint32_t size() { return fReader.size(); }
|
||||
const void* base() { return fReader.base(); }
|
||||
uint32_t offset() { return fReader.offset(); }
|
||||
bool eof() { return fReader.eof(); }
|
||||
void rewind() { fReader.rewind(); }
|
||||
void setOffset(size_t offset) { fReader.setOffset(offset); }
|
||||
|
||||
SkReader32* getReader32() { return &fReader; }
|
||||
|
||||
virtual uint8_t readU8() { return fReader.readU8(); }
|
||||
virtual uint16_t readU16() { return fReader.readU16(); }
|
||||
virtual uint32_t readU32() { return fReader.readU32(); }
|
||||
virtual void read(void* dst, size_t size) { return fReader.read(dst, size); }
|
||||
virtual bool readBool() { return fReader.readBool(); }
|
||||
virtual int32_t readInt() { return fReader.readInt(); }
|
||||
virtual SkScalar readScalar() { return fReader.readScalar(); }
|
||||
virtual const void* skip(size_t size) { return fReader.skip(size); }
|
||||
|
||||
virtual void readMatrix(SkMatrix* m) { fReader.readMatrix(m); }
|
||||
virtual void readPath(SkPath* p) { p->unflatten(fReader); }
|
||||
virtual void readPoint(SkPoint* p) {
|
||||
p->fX = fReader.readScalar();
|
||||
p->fY = fReader.readScalar();
|
||||
}
|
||||
|
||||
virtual SkTypeface* readTypeface();
|
||||
virtual SkRefCnt* readRefCnt();
|
||||
virtual void* readFunctionPtr();
|
||||
virtual SkFlattenable* readFlattenable();
|
||||
|
||||
private:
|
||||
SkReader32 fReader;
|
||||
|
||||
typedef SkFlattenableReadBuffer INHERITED;
|
||||
};
|
||||
|
||||
#endif // SkOrderedReadBuffer_DEFINED
|
||||
|
61
gfx/skia/include/core/SkOrderedWriteBuffer.h
Normal file
61
gfx/skia/include/core/SkOrderedWriteBuffer.h
Normal file
@ -0,0 +1,61 @@
|
||||
|
||||
/*
|
||||
* Copyright 2011 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#ifndef SkOrderedWriteBuffer_DEFINED
|
||||
#define SkOrderedWriteBuffer_DEFINED
|
||||
|
||||
#include "SkRefCnt.h"
|
||||
#include "SkBitmap.h"
|
||||
#include "SkFlattenable.h"
|
||||
#include "SkWriter32.h"
|
||||
#include "SkPath.h"
|
||||
|
||||
class SkOrderedWriteBuffer : public SkFlattenableWriteBuffer {
|
||||
public:
|
||||
SkOrderedWriteBuffer(size_t minSize);
|
||||
SkOrderedWriteBuffer(size_t minSize, void* initialStorage,
|
||||
size_t storageSize);
|
||||
virtual ~SkOrderedWriteBuffer() {}
|
||||
|
||||
// deprecated naming convention that will be removed after callers are updated
|
||||
virtual bool writeBool(bool value) { return fWriter.writeBool(value); }
|
||||
virtual void writeInt(int32_t value) { fWriter.writeInt(value); }
|
||||
virtual void write8(int32_t value) { fWriter.write8(value); }
|
||||
virtual void write16(int32_t value) { fWriter.write16(value); }
|
||||
virtual void write32(int32_t value) { fWriter.write32(value); }
|
||||
virtual void writeScalar(SkScalar value) { fWriter.writeScalar(value); }
|
||||
virtual void writeMul4(const void* values, size_t size) { fWriter.writeMul4(values, size); }
|
||||
|
||||
virtual void writePad(const void* src, size_t size) { fWriter.writePad(src, size); }
|
||||
virtual void writeString(const char* str, size_t len = (size_t)-1) { fWriter.writeString(str, len); }
|
||||
virtual bool writeToStream(SkWStream* stream) { return fWriter.writeToStream(stream); }
|
||||
virtual void write(const void* values, size_t size) { fWriter.write(values, size); }
|
||||
virtual void writeRect(const SkRect& rect) { fWriter.writeRect(rect); }
|
||||
virtual size_t readFromStream(SkStream* s, size_t length) { return fWriter.readFromStream(s, length); }
|
||||
|
||||
virtual void writeMatrix(const SkMatrix& matrix) { fWriter.writeMatrix(matrix); }
|
||||
virtual void writePath(const SkPath& path) { path.flatten(fWriter); };
|
||||
virtual void writePoint(const SkPoint& point) {
|
||||
fWriter.writeScalar(point.fX);
|
||||
fWriter.writeScalar(point.fY);
|
||||
}
|
||||
|
||||
virtual uint32_t* reserve(size_t size) { return fWriter.reserve(size); }
|
||||
virtual void flatten(void* dst) { fWriter.flatten(dst); }
|
||||
virtual uint32_t size() { return fWriter.size(); }
|
||||
|
||||
virtual void writeFunctionPtr(void*);
|
||||
virtual void writeFlattenable(SkFlattenable* flattenable);
|
||||
|
||||
private:
|
||||
SkWriter32 fWriter;
|
||||
typedef SkFlattenableWriteBuffer INHERITED;
|
||||
};
|
||||
|
||||
#endif // SkOrderedWriteBuffer_DEFINED
|
||||
|
@ -1,4 +1,5 @@
|
||||
|
||||
|
||||
/*
|
||||
* Copyright 2006 The Android Open Source Project
|
||||
*
|
||||
@ -11,6 +12,7 @@
|
||||
#define SkPaint_DEFINED
|
||||
|
||||
#include "SkColor.h"
|
||||
#include "SkDrawLooper.h"
|
||||
#include "SkXfermode.h"
|
||||
|
||||
class SkAutoGlyphCache;
|
||||
@ -28,7 +30,6 @@ class SkPath;
|
||||
class SkPathEffect;
|
||||
class SkRasterizer;
|
||||
class SkShader;
|
||||
class SkDrawLooper;
|
||||
class SkTypeface;
|
||||
|
||||
typedef const SkGlyph& (*SkDrawCacheProc)(SkGlyphCache*, const char**,
|
||||
@ -75,7 +76,7 @@ public:
|
||||
kNo_Hinting = 0,
|
||||
kSlight_Hinting = 1,
|
||||
kNormal_Hinting = 2, //!< this is the default
|
||||
kFull_Hinting = 3
|
||||
kFull_Hinting = 3,
|
||||
};
|
||||
|
||||
Hinting getHinting() const {
|
||||
@ -100,11 +101,12 @@ public:
|
||||
kEmbeddedBitmapText_Flag = 0x400, //!< mask to enable embedded bitmap strikes
|
||||
kAutoHinting_Flag = 0x800, //!< mask to force Freetype's autohinter
|
||||
kVerticalText_Flag = 0x1000,
|
||||
kGenA8FromLCD_Flag = 0x2000, // hack for GDI -- do not use if you can help it
|
||||
|
||||
// when adding extra flags, note that the fFlags member is specified
|
||||
// with a bit-width and you'll have to expand it.
|
||||
|
||||
kAllFlags = 0x1FFF
|
||||
kAllFlags = 0x3FFF
|
||||
};
|
||||
|
||||
/** Return the paint's flags. Use the Flag enum to test flag values.
|
||||
@ -287,7 +289,7 @@ public:
|
||||
kStroke_Style, //!< stroke the geometry
|
||||
kStrokeAndFill_Style, //!< fill and stroke the geometry
|
||||
|
||||
kStyleCount
|
||||
kStyleCount,
|
||||
};
|
||||
|
||||
/** Return the paint's style, used for controlling how primitives'
|
||||
@ -432,44 +434,6 @@ public:
|
||||
*/
|
||||
bool getFillPath(const SkPath& src, SkPath* dst) const;
|
||||
|
||||
/** Returns true if the current paint settings allow for fast computation of
|
||||
bounds (i.e. there is nothing complex like a patheffect that would make
|
||||
the bounds computation expensive.
|
||||
*/
|
||||
bool canComputeFastBounds() const {
|
||||
// use bit-or since no need for early exit
|
||||
return (reinterpret_cast<uintptr_t>(this->getMaskFilter()) |
|
||||
reinterpret_cast<uintptr_t>(this->getLooper()) |
|
||||
reinterpret_cast<uintptr_t>(this->getRasterizer()) |
|
||||
reinterpret_cast<uintptr_t>(this->getPathEffect())) == 0;
|
||||
}
|
||||
|
||||
/** Only call this if canComputeFastBounds() returned true. This takes a
|
||||
raw rectangle (the raw bounds of a shape), and adjusts it for stylistic
|
||||
effects in the paint (e.g. stroking). If needed, it uses the storage
|
||||
rect parameter. It returns the adjusted bounds that can then be used
|
||||
for quickReject tests.
|
||||
|
||||
The returned rect will either be orig or storage, thus the caller
|
||||
should not rely on storage being set to the result, but should always
|
||||
use the retured value. It is legal for orig and storage to be the same
|
||||
rect.
|
||||
|
||||
e.g.
|
||||
if (paint.canComputeFastBounds()) {
|
||||
SkRect r, storage;
|
||||
path.computeBounds(&r, SkPath::kFast_BoundsType);
|
||||
const SkRect& fastR = paint.computeFastBounds(r, &storage);
|
||||
if (canvas->quickReject(fastR, ...)) {
|
||||
// don't draw the path
|
||||
}
|
||||
}
|
||||
*/
|
||||
const SkRect& computeFastBounds(const SkRect& orig, SkRect* storage) const {
|
||||
return this->getStyle() == kFill_Style ? orig :
|
||||
this->computeStrokeFastBounds(orig, storage);
|
||||
}
|
||||
|
||||
/** Get the paint's shader object.
|
||||
<p />
|
||||
The shader's reference count is not affected.
|
||||
@ -478,14 +442,21 @@ public:
|
||||
SkShader* getShader() const { return fShader; }
|
||||
|
||||
/** Set or clear the shader object.
|
||||
<p />
|
||||
Pass NULL to clear any previous shader.
|
||||
As a convenience, the parameter passed is also returned.
|
||||
If a previous shader exists, its reference count is decremented.
|
||||
If shader is not NULL, its reference count is incremented.
|
||||
@param shader May be NULL. The shader to be installed in the paint
|
||||
@return shader
|
||||
*/
|
||||
* Shaders specify the source color(s) for what is being drawn. If a paint
|
||||
* has no shader, then the paint's color is used. If the paint has a
|
||||
* shader, then the shader's color(s) are use instead, but they are
|
||||
* modulated by the paint's alpha. This makes it easy to create a shader
|
||||
* once (e.g. bitmap tiling or gradient) and then change its transparency
|
||||
* w/o having to modify the original shader... only the paint's alpha needs
|
||||
* to be modified.
|
||||
* <p />
|
||||
* Pass NULL to clear any previous shader.
|
||||
* As a convenience, the parameter passed is also returned.
|
||||
* If a previous shader exists, its reference count is decremented.
|
||||
* If shader is not NULL, its reference count is incremented.
|
||||
* @param shader May be NULL. The shader to be installed in the paint
|
||||
* @return shader
|
||||
*/
|
||||
SkShader* setShader(SkShader* shader);
|
||||
|
||||
/** Get the paint's colorfilter. If there is a colorfilter, its reference
|
||||
@ -690,6 +661,7 @@ public:
|
||||
enum TextEncoding {
|
||||
kUTF8_TextEncoding, //!< the text parameters are UTF8
|
||||
kUTF16_TextEncoding, //!< the text parameters are UTF16
|
||||
kUTF32_TextEncoding, //!< the text parameters are UTF32
|
||||
kGlyphID_TextEncoding //!< the text parameters are glyph indices
|
||||
};
|
||||
|
||||
@ -780,7 +752,7 @@ public:
|
||||
*
|
||||
* @param text Address of the text
|
||||
* @param length Number of bytes of text to measure
|
||||
* @return The width of the text
|
||||
* @return The advance width of the text
|
||||
*/
|
||||
SkScalar measureText(const void* text, size_t length) const {
|
||||
return this->measureText(text, length, NULL, 0);
|
||||
@ -841,20 +813,82 @@ public:
|
||||
void getTextPath(const void* text, size_t length, SkScalar x, SkScalar y,
|
||||
SkPath* path) const;
|
||||
|
||||
void getPosTextPath(const void* text, size_t length,
|
||||
const SkPoint pos[], SkPath* path) const;
|
||||
|
||||
#ifdef SK_BUILD_FOR_ANDROID
|
||||
const SkGlyph& getUnicharMetrics(SkUnichar);
|
||||
const SkGlyph& getGlyphMetrics(uint16_t);
|
||||
const void* findImage(const SkGlyph&);
|
||||
|
||||
uint32_t getGenerationID() const;
|
||||
|
||||
/** Returns the base glyph count for the strike associated with this paint
|
||||
*/
|
||||
unsigned getBaseGlyphCount(SkUnichar text) const;
|
||||
#endif
|
||||
|
||||
// returns true if the paint's settings (e.g. xfermode + alpha) resolve to
|
||||
// mean that we need not draw at all (e.g. SrcOver + 0-alpha)
|
||||
bool nothingToDraw() const;
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// would prefer to make these private...
|
||||
|
||||
/** Returns true if the current paint settings allow for fast computation of
|
||||
bounds (i.e. there is nothing complex like a patheffect that would make
|
||||
the bounds computation expensive.
|
||||
*/
|
||||
bool canComputeFastBounds() const {
|
||||
if (this->getLooper()) {
|
||||
return this->getLooper()->canComputeFastBounds(*this);
|
||||
}
|
||||
return !this->getRasterizer();
|
||||
}
|
||||
|
||||
/** Only call this if canComputeFastBounds() returned true. This takes a
|
||||
raw rectangle (the raw bounds of a shape), and adjusts it for stylistic
|
||||
effects in the paint (e.g. stroking). If needed, it uses the storage
|
||||
rect parameter. It returns the adjusted bounds that can then be used
|
||||
for quickReject tests.
|
||||
|
||||
The returned rect will either be orig or storage, thus the caller
|
||||
should not rely on storage being set to the result, but should always
|
||||
use the retured value. It is legal for orig and storage to be the same
|
||||
rect.
|
||||
|
||||
e.g.
|
||||
if (paint.canComputeFastBounds()) {
|
||||
SkRect r, storage;
|
||||
path.computeBounds(&r, SkPath::kFast_BoundsType);
|
||||
const SkRect& fastR = paint.computeFastBounds(r, &storage);
|
||||
if (canvas->quickReject(fastR, ...)) {
|
||||
// don't draw the path
|
||||
}
|
||||
}
|
||||
*/
|
||||
const SkRect& computeFastBounds(const SkRect& orig, SkRect* storage) const {
|
||||
SkPaint::Style style = this->getStyle();
|
||||
// ultra fast-case: filling with no effects that affect geometry
|
||||
if (kFill_Style == style) {
|
||||
uintptr_t effects = reinterpret_cast<uintptr_t>(this->getLooper());
|
||||
effects |= reinterpret_cast<uintptr_t>(this->getMaskFilter());
|
||||
effects |= reinterpret_cast<uintptr_t>(this->getPathEffect());
|
||||
if (!effects) {
|
||||
return orig;
|
||||
}
|
||||
}
|
||||
|
||||
return this->doComputeFastBounds(orig, storage, style);
|
||||
}
|
||||
|
||||
const SkRect& computeFastStrokeBounds(const SkRect& orig,
|
||||
SkRect* storage) const {
|
||||
return this->doComputeFastBounds(orig, storage, kStroke_Style);
|
||||
}
|
||||
|
||||
// Take the style explicitly, so the caller can force us to be stroked
|
||||
// without having to make a copy of the paint just to change that field.
|
||||
const SkRect& doComputeFastBounds(const SkRect& orig, SkRect* storage,
|
||||
Style) const;
|
||||
|
||||
private:
|
||||
SkTypeface* fTypeface;
|
||||
SkScalar fTextSize;
|
||||
@ -873,7 +907,7 @@ private:
|
||||
SkColor fColor;
|
||||
SkScalar fWidth;
|
||||
SkScalar fMiterLimit;
|
||||
unsigned fFlags : 14;
|
||||
unsigned fFlags : 15;
|
||||
unsigned fTextAlign : 2;
|
||||
unsigned fCapType : 2;
|
||||
unsigned fJoinType : 2;
|
||||
@ -894,9 +928,6 @@ private:
|
||||
void (*proc)(const SkDescriptor*, void*),
|
||||
void* context, bool ignoreGamma = false) const;
|
||||
|
||||
const SkRect& computeStrokeFastBounds(const SkRect& orig,
|
||||
SkRect* storage) const;
|
||||
|
||||
enum {
|
||||
kCanonicalTextSizeForPaths = 64
|
||||
};
|
||||
@ -913,44 +944,5 @@ private:
|
||||
#endif
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "SkPathEffect.h"
|
||||
|
||||
/** \class SkStrokePathEffect
|
||||
|
||||
SkStrokePathEffect simulates stroking inside a patheffect, allowing the
|
||||
caller to have explicit control of when to stroke a path. Typically this is
|
||||
used if the caller wants to stroke before another patheffect is applied
|
||||
(using SkComposePathEffect or SkSumPathEffect).
|
||||
*/
|
||||
class SkStrokePathEffect : public SkPathEffect {
|
||||
public:
|
||||
SkStrokePathEffect(const SkPaint&);
|
||||
SkStrokePathEffect(SkScalar width, SkPaint::Style, SkPaint::Join,
|
||||
SkPaint::Cap, SkScalar miterLimit = -1);
|
||||
|
||||
// overrides
|
||||
virtual bool filterPath(SkPath* dst, const SkPath& src, SkScalar* width);
|
||||
|
||||
// overrides for SkFlattenable
|
||||
virtual void flatten(SkFlattenableWriteBuffer&);
|
||||
virtual Factory getFactory();
|
||||
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer&);
|
||||
|
||||
private:
|
||||
SkScalar fWidth, fMiter;
|
||||
uint8_t fStyle, fJoin, fCap;
|
||||
|
||||
SkStrokePathEffect(SkFlattenableReadBuffer&);
|
||||
|
||||
typedef SkPathEffect INHERITED;
|
||||
|
||||
// illegal
|
||||
SkStrokePathEffect(const SkStrokePathEffect&);
|
||||
SkStrokePathEffect& operator=(const SkStrokePathEffect&);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -161,6 +161,18 @@ public:
|
||||
this->setConvexity(isConvex ? kConvex_Convexity : kConcave_Convexity);
|
||||
}
|
||||
|
||||
/** Returns true if the path is an oval.
|
||||
*
|
||||
* @param rect returns the bounding rect of this oval. It's a circle
|
||||
* if the height and width are the same.
|
||||
*
|
||||
* @return true if this path is an oval.
|
||||
* Tracking whether a path is an oval is considered an
|
||||
* optimization for performance and so some paths that are in
|
||||
* fact ovals can report false.
|
||||
*/
|
||||
bool isOval(SkRect* rect) const;
|
||||
|
||||
/** Clear any lines and curves from the path, making it empty. This frees up
|
||||
internal storage associated with those segments.
|
||||
This does NOT change the fill-type setting nor isConvex
|
||||
@ -185,7 +197,7 @@ public:
|
||||
@return true if the line is of zero length; otherwise false.
|
||||
*/
|
||||
static bool IsLineDegenerate(const SkPoint& p1, const SkPoint& p2) {
|
||||
return p1.equalsWithinTolerance(p2, SK_ScalarNearlyZero);
|
||||
return p1.equalsWithinTolerance(p2);
|
||||
}
|
||||
|
||||
/** Test a quad for zero length
|
||||
@ -194,8 +206,8 @@ public:
|
||||
*/
|
||||
static bool IsQuadDegenerate(const SkPoint& p1, const SkPoint& p2,
|
||||
const SkPoint& p3) {
|
||||
return p1.equalsWithinTolerance(p2, SK_ScalarNearlyZero) &&
|
||||
p2.equalsWithinTolerance(p3, SK_ScalarNearlyZero);
|
||||
return p1.equalsWithinTolerance(p2) &&
|
||||
p2.equalsWithinTolerance(p3);
|
||||
}
|
||||
|
||||
/** Test a cubic curve for zero length
|
||||
@ -204,11 +216,19 @@ public:
|
||||
*/
|
||||
static bool IsCubicDegenerate(const SkPoint& p1, const SkPoint& p2,
|
||||
const SkPoint& p3, const SkPoint& p4) {
|
||||
return p1.equalsWithinTolerance(p2, SK_ScalarNearlyZero) &&
|
||||
p2.equalsWithinTolerance(p3, SK_ScalarNearlyZero) &&
|
||||
p3.equalsWithinTolerance(p4, SK_ScalarNearlyZero);
|
||||
return p1.equalsWithinTolerance(p2) &&
|
||||
p2.equalsWithinTolerance(p3) &&
|
||||
p3.equalsWithinTolerance(p4);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the path specifies a single line (i.e. it contains just
|
||||
* a moveTo and a lineTo). If so, and line[] is not null, it sets the 2
|
||||
* points in line[] to the end-points of the line. If the path is not a
|
||||
* line, returns false and ignores line[].
|
||||
*/
|
||||
bool isLine(SkPoint line[2]) const;
|
||||
|
||||
/** Returns true if the path specifies a rectangle. If so, and if rect is
|
||||
not null, set rect to the bounds of the path. If the path does not
|
||||
specify a rectangle, return false and ignore rect.
|
||||
@ -453,6 +473,24 @@ public:
|
||||
kCCW_Direction
|
||||
};
|
||||
|
||||
/**
|
||||
* Tries to quickly compute the direction of the first non-degenerate
|
||||
* contour. If it can be computed, return true and set dir to that
|
||||
* direction. If it cannot be (quickly) determined, return false and ignore
|
||||
* the dir parameter.
|
||||
*/
|
||||
bool cheapComputeDirection(Direction* dir) const;
|
||||
|
||||
/**
|
||||
* Returns true if the path's direction can be computed via
|
||||
* cheapComputDirection() and if that computed direction matches the
|
||||
* specified direction.
|
||||
*/
|
||||
bool cheapIsDirection(Direction dir) const {
|
||||
Direction computedDir;
|
||||
return this->cheapComputeDirection(&computedDir) && computedDir == dir;
|
||||
}
|
||||
|
||||
/** Add a closed rectangle contour to the path
|
||||
@param rect The rectangle to add as a closed contour to the path
|
||||
@param dir The direction to wind the rectangle's contour
|
||||
@ -526,7 +564,7 @@ public:
|
||||
@param dx The amount to translate the path in X as it is added
|
||||
@param dx The amount to translate the path in Y as it is added
|
||||
*/
|
||||
void addPath(const SkPath& src, SkScalar dx, SkScalar dy);
|
||||
void addPath(const SkPath& src, SkScalar dx, SkScalar dy);
|
||||
|
||||
/** Add a copy of src to the path
|
||||
*/
|
||||
@ -541,6 +579,11 @@ public:
|
||||
*/
|
||||
void addPath(const SkPath& src, const SkMatrix& matrix);
|
||||
|
||||
/**
|
||||
* Same as addPath(), but reverses the src input
|
||||
*/
|
||||
void reverseAddPath(const SkPath& src);
|
||||
|
||||
/** Offset the path by (dx,dy), returning true on success
|
||||
|
||||
@param dx The amount in the X direction to offset the entire path
|
||||
@ -641,9 +684,16 @@ public:
|
||||
segments have been visited, return kDone_Verb.
|
||||
|
||||
@param pts The points representing the current verb and/or segment
|
||||
@param doConsumeDegerates If true, first scan for segments that are
|
||||
deemed degenerate (too short) and skip those.
|
||||
@return The verb for the current segment
|
||||
*/
|
||||
Verb next(SkPoint pts[4]);
|
||||
Verb next(SkPoint pts[4], bool doConsumeDegerates = true) {
|
||||
if (doConsumeDegerates) {
|
||||
this->consumeDegenerateSegments();
|
||||
}
|
||||
return this->doNext(pts);
|
||||
}
|
||||
|
||||
/** If next() returns kLine_Verb, then this query returns true if the
|
||||
line was the result of a close() command (i.e. the end point is the
|
||||
@ -671,9 +721,10 @@ public:
|
||||
SkBool8 fCloseLine;
|
||||
SkBool8 fSegmentState;
|
||||
|
||||
bool cons_moveTo(SkPoint pts[1]);
|
||||
inline const SkPoint& cons_moveTo();
|
||||
Verb autoClose(SkPoint pts[2]);
|
||||
void consumeDegenerateSegments();
|
||||
Verb doNext(SkPoint pts[4]);
|
||||
};
|
||||
|
||||
/** Iterate through the verbs in the path, providing the associated points.
|
||||
@ -709,6 +760,8 @@ public:
|
||||
|
||||
#ifdef SK_BUILD_FOR_ANDROID
|
||||
uint32_t getGenerationID() const;
|
||||
const SkPath* getSourcePath() const;
|
||||
void setSourcePath(const SkPath* path);
|
||||
#endif
|
||||
|
||||
SkDEBUGCODE(void validate() const;)
|
||||
@ -717,19 +770,22 @@ private:
|
||||
SkTDArray<SkPoint> fPts;
|
||||
SkTDArray<uint8_t> fVerbs;
|
||||
mutable SkRect fBounds;
|
||||
int fLastMoveToIndex;
|
||||
uint8_t fFillType;
|
||||
uint8_t fSegmentMask;
|
||||
mutable uint8_t fBoundsIsDirty;
|
||||
mutable uint8_t fConvexity;
|
||||
|
||||
mutable SkBool8 fIsOval;
|
||||
#ifdef SK_BUILD_FOR_ANDROID
|
||||
uint32_t fGenerationID;
|
||||
const SkPath* fSourcePath;
|
||||
#endif
|
||||
|
||||
// called, if dirty, by getBounds()
|
||||
void computeBounds() const;
|
||||
|
||||
friend class Iter;
|
||||
void cons_moveto();
|
||||
|
||||
friend class SkPathStroker;
|
||||
/* Append the first contour of path, ignoring path's initial point. If no
|
||||
@ -744,8 +800,18 @@ private:
|
||||
*/
|
||||
void reversePathTo(const SkPath&);
|
||||
|
||||
friend const SkPoint* sk_get_path_points(const SkPath&, int index);
|
||||
// called before we add points for lineTo, quadTo, cubicTo, checking to see
|
||||
// if we need to inject a leading moveTo first
|
||||
//
|
||||
// SkPath path; path.lineTo(...); <--- need a leading moveTo(0, 0)
|
||||
// SkPath path; ... path.close(); path.lineTo(...) <-- need a moveTo(previous moveTo)
|
||||
//
|
||||
inline void injectMoveToIfNeeded();
|
||||
|
||||
inline bool hasOnlyMoveTos() const;
|
||||
|
||||
friend class SkAutoPathBoundsUpdate;
|
||||
friend class SkAutoDisableOvalCheck;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -34,11 +34,21 @@ public:
|
||||
*/
|
||||
virtual bool filterPath(SkPath* dst, const SkPath& src, SkScalar* width) = 0;
|
||||
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR()
|
||||
/**
|
||||
* Compute a conservative bounds for its effect, given the src bounds.
|
||||
* The baseline implementation just assigns src to dst.
|
||||
*/
|
||||
virtual void computeFastBounds(SkRect* dst, const SkRect& src);
|
||||
|
||||
protected:
|
||||
SkPathEffect(SkFlattenableReadBuffer& buffer) : INHERITED(buffer) {}
|
||||
|
||||
private:
|
||||
// illegal
|
||||
SkPathEffect(const SkPathEffect&);
|
||||
SkPathEffect& operator=(const SkPathEffect&);
|
||||
|
||||
typedef SkFlattenable INHERITED;
|
||||
};
|
||||
|
||||
/** \class SkPairPathEffect
|
||||
@ -54,7 +64,8 @@ public:
|
||||
|
||||
protected:
|
||||
SkPairPathEffect(SkFlattenableReadBuffer&);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) SK_OVERRIDE;
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
// these are visible to our subclasses
|
||||
SkPathEffect* fPE0, *fPE1;
|
||||
|
||||
@ -81,16 +92,12 @@ public:
|
||||
|
||||
virtual bool filterPath(SkPath* dst, const SkPath& src, SkScalar* width);
|
||||
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer& buffer) {
|
||||
return SkNEW_ARGS(SkComposePathEffect, (buffer));
|
||||
}
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkComposePathEffect)
|
||||
|
||||
protected:
|
||||
virtual Factory getFactory() SK_OVERRIDE { return CreateProc; }
|
||||
|
||||
private:
|
||||
SkComposePathEffect(SkFlattenableReadBuffer& buffer) : INHERITED(buffer) {}
|
||||
|
||||
private:
|
||||
// illegal
|
||||
SkComposePathEffect(const SkComposePathEffect&);
|
||||
SkComposePathEffect& operator=(const SkComposePathEffect&);
|
||||
@ -116,16 +123,12 @@ public:
|
||||
// overrides
|
||||
virtual bool filterPath(SkPath* dst, const SkPath& src, SkScalar* width);
|
||||
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer& buffer) {
|
||||
return SkNEW_ARGS(SkSumPathEffect, (buffer));
|
||||
}
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkSumPathEffect)
|
||||
|
||||
protected:
|
||||
virtual Factory getFactory() SK_OVERRIDE { return CreateProc; }
|
||||
|
||||
private:
|
||||
SkSumPathEffect(SkFlattenableReadBuffer& buffer) : INHERITED(buffer) {}
|
||||
|
||||
private:
|
||||
// illegal
|
||||
SkSumPathEffect(const SkSumPathEffect&);
|
||||
SkSumPathEffect& operator=(const SkSumPathEffect&);
|
||||
|
@ -29,7 +29,7 @@ public:
|
||||
a different path (or null), since the measure object keeps a pointer to the
|
||||
path object (does not copy its data).
|
||||
*/
|
||||
void setPath(const SkPath*, bool forceClosed);
|
||||
void setPath(const SkPath*, bool forceClosed);
|
||||
|
||||
/** Return the total length of the current contour, or 0 if no path
|
||||
is associated (e.g. resetPath(null))
|
||||
@ -41,19 +41,23 @@ public:
|
||||
Returns false if there is no path, or a zero-length path was specified, in which case
|
||||
position and tangent are unchanged.
|
||||
*/
|
||||
bool getPosTan(SkScalar distance, SkPoint* position, SkVector* tangent);
|
||||
bool SK_WARN_UNUSED_RESULT getPosTan(SkScalar distance, SkPoint* position,
|
||||
SkVector* tangent);
|
||||
|
||||
enum MatrixFlags {
|
||||
kGetPosition_MatrixFlag = 0x01,
|
||||
kGetTangent_MatrixFlag = 0x02,
|
||||
kGetPosAndTan_MatrixFlag = kGetPosition_MatrixFlag | kGetTangent_MatrixFlag
|
||||
};
|
||||
|
||||
/** Pins distance to 0 <= distance <= getLength(), and then computes
|
||||
the corresponding matrix (by calling getPosTan).
|
||||
Returns false if there is no path, or a zero-length path was specified, in which case
|
||||
matrix is unchanged.
|
||||
*/
|
||||
bool getMatrix(SkScalar distance, SkMatrix* matrix, MatrixFlags flags = kGetPosAndTan_MatrixFlag);
|
||||
bool SK_WARN_UNUSED_RESULT getMatrix(SkScalar distance, SkMatrix* matrix,
|
||||
MatrixFlags flags = kGetPosAndTan_MatrixFlag);
|
||||
|
||||
/** Given a start and stop distance, return in dst the intervening segment(s).
|
||||
If the segment is zero-length, return false, else return true.
|
||||
startD and stopD are pinned to legal values (0..getLength()). If startD <= stopD
|
||||
@ -85,13 +89,14 @@ private:
|
||||
|
||||
struct Segment {
|
||||
SkScalar fDistance; // total distance up to this point
|
||||
unsigned fPtIndex : 15;
|
||||
unsigned fPtIndex : 15; // index into the fPts array
|
||||
unsigned fTValue : 15;
|
||||
unsigned fType : 2;
|
||||
|
||||
SkScalar getScalarT() const;
|
||||
};
|
||||
SkTDArray<Segment> fSegments;
|
||||
SkTDArray<SkPoint> fPts; // Points used to define the segments
|
||||
|
||||
static const Segment* NextSegment(const Segment*);
|
||||
|
||||
@ -104,4 +109,3 @@ private:
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -13,35 +13,15 @@
|
||||
#include "SkBitmap.h"
|
||||
#include "SkRefCnt.h"
|
||||
#include "SkString.h"
|
||||
#include "SkFlattenable.h"
|
||||
|
||||
class SkColorTable;
|
||||
struct SkIRect;
|
||||
class SkMutex;
|
||||
class SkFlattenableReadBuffer;
|
||||
class SkFlattenableWriteBuffer;
|
||||
|
||||
// this is an opaque class, not interpreted by skia
|
||||
class SkGpuTexture;
|
||||
|
||||
#if SK_ALLOW_STATIC_GLOBAL_INITIALIZERS
|
||||
|
||||
#define SK_DECLARE_PIXEL_REF_REGISTRAR()
|
||||
|
||||
#define SK_DEFINE_PIXEL_REF_REGISTRAR(pixelRef) \
|
||||
static SkPixelRef::Registrar g##pixelRef##Reg(#pixelRef, \
|
||||
pixelRef::Create);
|
||||
|
||||
#else
|
||||
|
||||
#define SK_DECLARE_PIXEL_REF_REGISTRAR() static void Init();
|
||||
|
||||
#define SK_DEFINE_PIXEL_REF_REGISTRAR(pixelRef) \
|
||||
void pixelRef::Init() { \
|
||||
SkPixelRef::Registrar(#pixelRef, Create); \
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/** \class SkPixelRef
|
||||
|
||||
This class is the smart container for pixel memory, and is used with
|
||||
@ -50,9 +30,9 @@ class SkGpuTexture;
|
||||
|
||||
This class can be shared/accessed between multiple threads.
|
||||
*/
|
||||
class SkPixelRef : public SkRefCnt {
|
||||
class SK_API SkPixelRef : public SkFlattenable {
|
||||
public:
|
||||
explicit SkPixelRef(SkMutex* mutex = NULL);
|
||||
explicit SkPixelRef(SkBaseMutex* mutex = NULL);
|
||||
|
||||
/** Return the pixel memory returned from lockPixels, or null if the
|
||||
lockCount is 0.
|
||||
@ -63,9 +43,10 @@ public:
|
||||
*/
|
||||
SkColorTable* colorTable() const { return fColorTable; }
|
||||
|
||||
/** Return the current lockcount (defaults to 0)
|
||||
*/
|
||||
int getLockCount() const { return fLockCount; }
|
||||
/**
|
||||
* Returns true if the lockcount > 0
|
||||
*/
|
||||
bool isLocked() const { return fLockCount > 0; }
|
||||
|
||||
/** Call to access the pixel memory, which is returned. Balance with a call
|
||||
to unlockPixels().
|
||||
@ -142,13 +123,6 @@ public:
|
||||
support deep copies. */
|
||||
virtual SkPixelRef* deepCopy(SkBitmap::Config config) { return NULL; }
|
||||
|
||||
// serialization
|
||||
|
||||
typedef SkPixelRef* (*Factory)(SkFlattenableReadBuffer&);
|
||||
|
||||
virtual Factory getFactory() const { return NULL; }
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const;
|
||||
|
||||
#ifdef SK_BUILD_FOR_ANDROID
|
||||
/**
|
||||
* Acquire a "global" ref on this object.
|
||||
@ -165,17 +139,6 @@ public:
|
||||
virtual void globalUnref();
|
||||
#endif
|
||||
|
||||
static Factory NameToFactory(const char name[]);
|
||||
static const char* FactoryToName(Factory);
|
||||
static void Register(const char name[], Factory);
|
||||
|
||||
class Registrar {
|
||||
public:
|
||||
Registrar(const char name[], Factory factory) {
|
||||
SkPixelRef::Register(name, factory);
|
||||
}
|
||||
};
|
||||
|
||||
protected:
|
||||
/** Called when the lockCount goes from 0 to 1. The caller will have already
|
||||
acquire a mutex for thread safety, so this method need not do that.
|
||||
@ -201,16 +164,27 @@ protected:
|
||||
/** Return the mutex associated with this pixelref. This value is assigned
|
||||
in the constructor, and cannot change during the lifetime of the object.
|
||||
*/
|
||||
SkMutex* mutex() const { return fMutex; }
|
||||
SkBaseMutex* mutex() const { return fMutex; }
|
||||
|
||||
SkPixelRef(SkFlattenableReadBuffer&, SkMutex*);
|
||||
// serialization
|
||||
SkPixelRef(SkFlattenableReadBuffer&, SkBaseMutex*);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
// only call from constructor. Flags this to always be locked, removing
|
||||
// the need to grab the mutex and call onLockPixels/onUnlockPixels.
|
||||
// Performance tweak to avoid those calls (esp. in multi-thread use case).
|
||||
void setPreLocked(void* pixels, SkColorTable* ctable);
|
||||
|
||||
/**
|
||||
* If a subclass passed a particular mutex to the base constructor, it can
|
||||
* override that to go back to the default mutex by calling this. However,
|
||||
* this should only be called from within the subclass' constructor.
|
||||
*/
|
||||
void useDefaultMutex() { this->setMutex(NULL); }
|
||||
|
||||
private:
|
||||
#if !SK_ALLOW_STATIC_GLOBAL_INITIALIZERS
|
||||
static void InitializeFlattenables();
|
||||
#endif
|
||||
|
||||
SkMutex* fMutex; // must remain in scope for the life of this object
|
||||
SkBaseMutex* fMutex; // must remain in scope for the life of this object
|
||||
void* fPixels;
|
||||
SkColorTable* fColorTable; // we do not track ownership, subclass does
|
||||
int fLockCount;
|
||||
@ -221,8 +195,12 @@ private:
|
||||
|
||||
// can go from false to true, but never from true to false
|
||||
bool fIsImmutable;
|
||||
// only ever set in constructor, const after that
|
||||
bool fPreLocked;
|
||||
|
||||
friend class SkGraphics;
|
||||
void setMutex(SkBaseMutex* mutex);
|
||||
|
||||
typedef SkFlattenable INHERITED;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -213,7 +213,13 @@ struct SK_API SkPoint {
|
||||
* Return true if the computed length of the vector is >= the internal
|
||||
* tolerance (used to avoid dividing by tiny values).
|
||||
*/
|
||||
static bool CanNormalize(SkScalar dx, SkScalar dy);
|
||||
static bool CanNormalize(SkScalar dx, SkScalar dy)
|
||||
#ifdef SK_SCALAR_IS_FLOAT
|
||||
// Simple enough (and performance critical sometimes) so we inline it.
|
||||
{ return (dx*dx + dy*dy) > (SK_ScalarNearlyZero * SK_ScalarNearlyZero); }
|
||||
#else
|
||||
;
|
||||
#endif
|
||||
|
||||
bool canNormalize() const {
|
||||
return CanNormalize(fX, fY);
|
||||
@ -315,11 +321,29 @@ struct SK_API SkPoint {
|
||||
return a.fX != b.fX || a.fY != b.fY;
|
||||
}
|
||||
|
||||
/** Return true if this and the given point are componentwise within tol.
|
||||
/** Return true if this point and the given point are far enough apart
|
||||
such that a vector between them would be non-degenerate.
|
||||
|
||||
WARNING: Unlike the deprecated version of equalsWithinTolerance(),
|
||||
this method does not use componentwise comparison. Instead, it
|
||||
uses a comparison designed to match judgments elsewhere regarding
|
||||
degeneracy ("points A and B are so close that the vector between them
|
||||
is essentially zero").
|
||||
*/
|
||||
bool equalsWithinTolerance(const SkPoint& v, SkScalar tol) const {
|
||||
return SkScalarNearlyZero(fX - v.fX, tol)
|
||||
&& SkScalarNearlyZero(fY - v.fY, tol);
|
||||
bool equalsWithinTolerance(const SkPoint& p) const {
|
||||
return !CanNormalize(fX - p.fX, fY - p.fY);
|
||||
}
|
||||
|
||||
/** DEPRECATED: Return true if this and the given point are componentwise
|
||||
within tolerance "tol".
|
||||
|
||||
WARNING: There is no guarantee that the result will reflect judgments
|
||||
elsewhere regarding degeneracy ("points A and B are so close that the
|
||||
vector between them is essentially zero").
|
||||
*/
|
||||
bool equalsWithinTolerance(const SkPoint& p, SkScalar tol) const {
|
||||
return SkScalarNearlyZero(fX - p.fX, tol)
|
||||
&& SkScalarNearlyZero(fY - p.fY, tol);
|
||||
}
|
||||
|
||||
/** Returns a new point whose coordinates are the difference between
|
||||
@ -442,11 +466,11 @@ struct SK_API SkPoint {
|
||||
void setOrthog(const SkPoint& vec, Side side = kLeft_Side) {
|
||||
// vec could be this
|
||||
SkScalar tmp = vec.fX;
|
||||
if (kLeft_Side == side) {
|
||||
if (kRight_Side == side) {
|
||||
fX = -vec.fY;
|
||||
fY = tmp;
|
||||
} else {
|
||||
SkASSERT(kRight_Side == side);
|
||||
SkASSERT(kLeft_Side == side);
|
||||
fX = vec.fY;
|
||||
fY = -tmp;
|
||||
}
|
||||
|
@ -82,6 +82,12 @@
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(SK_ZLIB_INCLUDE) && defined(SK_SYSTEM_ZLIB)
|
||||
#error "cannot define both SK_ZLIB_INCLUDE and SK_SYSTEM_ZLIB"
|
||||
#elif defined(SK_ZLIB_INCLUDE) || defined(SK_SYSTEM_ZLIB)
|
||||
#define SK_HAS_ZLIB
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef SkNEW
|
||||
@ -282,18 +288,9 @@
|
||||
#if defined(_MSC_VER)
|
||||
#define SK_OVERRIDE override
|
||||
#elif defined(__clang__)
|
||||
#if __has_feature(cxx_override_control)
|
||||
// Some documentation suggests we should be using __attribute__((override)),
|
||||
// but it doesn't work.
|
||||
#define SK_OVERRIDE override
|
||||
#elif defined(__has_extension)
|
||||
#if __has_extension(cxx_override_control)
|
||||
#define SK_OVERRIDE override
|
||||
#endif
|
||||
#endif
|
||||
#ifndef SK_OVERRIDE
|
||||
#define SK_OVERRIDE
|
||||
#endif
|
||||
#else
|
||||
// Linux GCC ignores "__attribute__((override))" and rejects "override".
|
||||
#define SK_OVERRIDE
|
||||
@ -305,48 +302,3 @@
|
||||
#ifndef SK_ALLOW_STATIC_GLOBAL_INITIALIZERS
|
||||
#define SK_ALLOW_STATIC_GLOBAL_INITIALIZERS 1
|
||||
#endif
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// ARM defines
|
||||
|
||||
#if defined(__GNUC__) && defined(__arm__)
|
||||
|
||||
# define SK_ARM_ARCH 3
|
||||
|
||||
# if defined(__ARM_ARCH_4__) || defined(__ARM_ARCH_4T__) \
|
||||
|| defined(_ARM_ARCH_4)
|
||||
# undef SK_ARM_ARCH
|
||||
# define SK_ARM_ARCH 4
|
||||
# endif
|
||||
|
||||
# if defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \
|
||||
|| defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
|
||||
|| defined(__ARM_ARCH_5TEJ__) || defined(_ARM_ARCH_5)
|
||||
# undef SK_ARM_ARCH
|
||||
# define SK_ARM_ARCH 5
|
||||
# endif
|
||||
|
||||
# if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
|
||||
|| defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
|
||||
|| defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) \
|
||||
|| defined(__ARM_ARCH_6M__) || defined(_ARM_ARCH_6)
|
||||
# undef SK_ARM_ARCH
|
||||
# define SK_ARM_ARCH 6
|
||||
# endif
|
||||
|
||||
# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
|
||||
|| defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
|
||||
|| defined(__ARM_ARCH_7EM__) || defined(_ARM_ARCH_7)
|
||||
# undef SK_ARM_ARCH
|
||||
# define SK_ARM_ARCH 7
|
||||
# endif
|
||||
|
||||
# undef SK_ARM_HAS_EDSP
|
||||
# if defined(__thumb2__) && (SK_ARM_ARCH >= 6) \
|
||||
|| !defined(__thumb__) \
|
||||
&& ((SK_ARM_ARCH > 5) || defined(__ARM_ARCH_5E__) \
|
||||
|| defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__))
|
||||
# define SK_ARM_HAS_EDSP 1
|
||||
# endif
|
||||
|
||||
#endif
|
||||
|
@ -30,16 +30,15 @@
|
||||
#define SK_BUILD_FOR_WIN32
|
||||
#elif defined(__SYMBIAN32__)
|
||||
#define SK_BUILD_FOR_WIN32
|
||||
#elif defined(linux) || defined(__FreeBSD__) || defined(__OpenBSD__) || \
|
||||
defined(__sun) || defined(__NetBSD__) || defined(__DragonFly__) || \
|
||||
defined(__GLIBC__) || defined(__GNU__)
|
||||
#define SK_BUILD_FOR_UNIX
|
||||
#elif TARGET_OS_IPHONE || TARGET_IPHONE_SIMULATOR
|
||||
#define SK_BUILD_FOR_IOS
|
||||
#elif defined(ANDROID_NDK)
|
||||
#define SK_BUILD_FOR_ANDROID_NDK
|
||||
#elif defined(ANDROID)
|
||||
#define SK_BUILD_FOR_ANDROID
|
||||
#elif defined(linux) || defined(__FreeBSD__) || defined(__OpenBSD__) || \
|
||||
defined(__sun) || defined(__NetBSD__) || defined(__DragonFly__)
|
||||
#define SK_BUILD_FOR_UNIX
|
||||
#elif TARGET_OS_IPHONE || TARGET_IPHONE_SIMULATOR
|
||||
#define SK_BUILD_FOR_IOS
|
||||
#else
|
||||
#define SK_BUILD_FOR_MAC
|
||||
#endif
|
||||
@ -69,6 +68,9 @@
|
||||
#if !defined(SK_RESTRICT)
|
||||
#define SK_RESTRICT __restrict
|
||||
#endif
|
||||
#if !defined(SK_WARN_UNUSED_RESULT)
|
||||
#define SK_WARN_UNUSED_RESULT
|
||||
#endif
|
||||
#include "sk_stdint.h"
|
||||
#endif
|
||||
|
||||
@ -78,6 +80,10 @@
|
||||
#define SK_RESTRICT __restrict__
|
||||
#endif
|
||||
|
||||
#if !defined(SK_WARN_UNUSED_RESULT)
|
||||
#define SK_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
|
||||
#endif
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
#if !defined(SK_SCALAR_IS_FLOAT) && !defined(SK_SCALAR_IS_FIXED)
|
||||
|
@ -71,7 +71,7 @@ private:
|
||||
// is not related to its "index".
|
||||
SkTDArray<Pair> fList;
|
||||
|
||||
static int Cmp(const Pair& a, const Pair& b);
|
||||
static int Cmp(const Pair* a, const Pair* b);
|
||||
|
||||
typedef SkRefCnt INHERITED;
|
||||
};
|
||||
|
@ -28,9 +28,8 @@ public:
|
||||
const SkIRect* clipBounds, SkMaskFilter* filter,
|
||||
SkMask* mask, SkMask::CreateMode mode);
|
||||
|
||||
virtual void flatten(SkFlattenableWriteBuffer& ) SK_OVERRIDE {}
|
||||
protected:
|
||||
SkRasterizer(SkFlattenableReadBuffer&);
|
||||
SkRasterizer(SkFlattenableReadBuffer& buffer) : INHERITED(buffer) {}
|
||||
|
||||
virtual bool onRasterize(const SkPath& path, const SkMatrix& matrix,
|
||||
const SkIRect* clipBounds,
|
||||
|
@ -10,6 +10,8 @@
|
||||
#ifndef SkReader32_DEFINED
|
||||
#define SkReader32_DEFINED
|
||||
|
||||
#include "SkMatrix.h"
|
||||
#include "SkRegion.h"
|
||||
#include "SkScalar.h"
|
||||
|
||||
class SkString;
|
||||
@ -90,6 +92,18 @@ public:
|
||||
int32_t readS32() { return this->readInt(); }
|
||||
uint32_t readU32() { return this->readInt(); }
|
||||
|
||||
void readMatrix(SkMatrix* matrix) {
|
||||
size_t size = matrix->unflatten(this->peek());
|
||||
SkASSERT(SkAlign4(size) == size);
|
||||
(void)this->skip(size);
|
||||
}
|
||||
|
||||
void readRegion(SkRegion* rgn) {
|
||||
size_t size = rgn->unflatten(this->peek());
|
||||
SkASSERT(SkAlign4(size) == size);
|
||||
(void)this->skip(size);
|
||||
}
|
||||
|
||||
/**
|
||||
* Read the length of a string (written by SkWriter32::writeString) into
|
||||
* len (if len is not NULL) and return the null-ternimated address of the
|
||||
|
@ -20,31 +20,31 @@
|
||||
struct SK_API SkIRect {
|
||||
int32_t fLeft, fTop, fRight, fBottom;
|
||||
|
||||
static SkIRect MakeEmpty() {
|
||||
static SkIRect SK_WARN_UNUSED_RESULT MakeEmpty() {
|
||||
SkIRect r;
|
||||
r.setEmpty();
|
||||
return r;
|
||||
}
|
||||
|
||||
static SkIRect MakeWH(int32_t w, int32_t h) {
|
||||
static SkIRect SK_WARN_UNUSED_RESULT MakeWH(int32_t w, int32_t h) {
|
||||
SkIRect r;
|
||||
r.set(0, 0, w, h);
|
||||
return r;
|
||||
}
|
||||
|
||||
static SkIRect MakeSize(const SkISize& size) {
|
||||
static SkIRect SK_WARN_UNUSED_RESULT MakeSize(const SkISize& size) {
|
||||
SkIRect r;
|
||||
r.set(0, 0, size.width(), size.height());
|
||||
return r;
|
||||
}
|
||||
|
||||
static SkIRect MakeLTRB(int32_t l, int32_t t, int32_t r, int32_t b) {
|
||||
static SkIRect SK_WARN_UNUSED_RESULT MakeLTRB(int32_t l, int32_t t, int32_t r, int32_t b) {
|
||||
SkIRect rect;
|
||||
rect.set(l, t, r, b);
|
||||
return rect;
|
||||
}
|
||||
|
||||
static SkIRect MakeXYWH(int32_t x, int32_t y, int32_t w, int32_t h) {
|
||||
static SkIRect SK_WARN_UNUSED_RESULT MakeXYWH(int32_t x, int32_t y, int32_t w, int32_t h) {
|
||||
SkIRect r;
|
||||
r.set(x, y, x + w, y + h);
|
||||
return r;
|
||||
@ -75,7 +75,7 @@ struct SK_API SkIRect {
|
||||
* Return true if the rectangle's width or height are <= 0
|
||||
*/
|
||||
bool isEmpty() const { return fLeft >= fRight || fTop >= fBottom; }
|
||||
|
||||
|
||||
friend bool operator==(const SkIRect& a, const SkIRect& b) {
|
||||
return !memcmp(&a, &b, sizeof(a));
|
||||
}
|
||||
@ -144,7 +144,7 @@ struct SK_API SkIRect {
|
||||
|
||||
/** Inset the rectangle by (dx,dy). If dx is positive, then the sides are moved inwards,
|
||||
making the rectangle narrower. If dx is negative, then the sides are moved outwards,
|
||||
making the rectangle wider. The same hods true for dy and the top and bottom.
|
||||
making the rectangle wider. The same holds true for dy and the top and bottom.
|
||||
*/
|
||||
void inset(int32_t dx, int32_t dy) {
|
||||
fLeft += dx;
|
||||
@ -153,6 +153,13 @@ struct SK_API SkIRect {
|
||||
fBottom -= dy;
|
||||
}
|
||||
|
||||
/** Outset the rectangle by (dx,dy). If dx is positive, then the sides are
|
||||
moved outwards, making the rectangle wider. If dx is negative, then the
|
||||
sides are moved inwards, making the rectangle narrower. The same holds
|
||||
true for dy and the top and bottom.
|
||||
*/
|
||||
void outset(int32_t dx, int32_t dy) { this->inset(-dx, -dy); }
|
||||
|
||||
bool quickReject(int l, int t, int r, int b) const {
|
||||
return l >= fRight || fLeft >= r || t >= fBottom || fTop >= b;
|
||||
}
|
||||
@ -185,18 +192,18 @@ struct SK_API SkIRect {
|
||||
}
|
||||
|
||||
/** Return true if this rectangle contains the specified rectangle.
|
||||
For speed, this method does not check if either this or the specified
|
||||
rectangles are empty, and if either is, its return value is undefined.
|
||||
In the debugging build however, we assert that both this and the
|
||||
specified rectangles are non-empty.
|
||||
For speed, this method does not check if either this or the specified
|
||||
rectangles are empty, and if either is, its return value is undefined.
|
||||
In the debugging build however, we assert that both this and the
|
||||
specified rectangles are non-empty.
|
||||
*/
|
||||
bool containsNoEmptyCheck(int32_t left, int32_t top,
|
||||
int32_t right, int32_t bottom) const {
|
||||
SkASSERT(fLeft < fRight && fTop < fBottom);
|
||||
int32_t right, int32_t bottom) const {
|
||||
SkASSERT(fLeft < fRight && fTop < fBottom);
|
||||
SkASSERT(left < right && top < bottom);
|
||||
|
||||
return fLeft <= left && fTop <= top &&
|
||||
fRight >= right && fBottom >= bottom;
|
||||
fRight >= right && fBottom >= bottom;
|
||||
}
|
||||
|
||||
/** If r intersects this rectangle, return true and set this rectangle to that
|
||||
@ -294,7 +301,7 @@ struct SK_API SkIRect {
|
||||
*/
|
||||
void sort();
|
||||
|
||||
static const SkIRect& EmptyIRect() {
|
||||
static const SkIRect& SK_WARN_UNUSED_RESULT EmptyIRect() {
|
||||
static const SkIRect gEmpty = { 0, 0, 0, 0 };
|
||||
return gEmpty;
|
||||
}
|
||||
@ -305,31 +312,31 @@ struct SK_API SkIRect {
|
||||
struct SK_API SkRect {
|
||||
SkScalar fLeft, fTop, fRight, fBottom;
|
||||
|
||||
static SkRect MakeEmpty() {
|
||||
static SkRect SK_WARN_UNUSED_RESULT MakeEmpty() {
|
||||
SkRect r;
|
||||
r.setEmpty();
|
||||
return r;
|
||||
}
|
||||
|
||||
static SkRect MakeWH(SkScalar w, SkScalar h) {
|
||||
static SkRect SK_WARN_UNUSED_RESULT MakeWH(SkScalar w, SkScalar h) {
|
||||
SkRect r;
|
||||
r.set(0, 0, w, h);
|
||||
return r;
|
||||
}
|
||||
|
||||
static SkRect MakeSize(const SkSize& size) {
|
||||
static SkRect SK_WARN_UNUSED_RESULT MakeSize(const SkSize& size) {
|
||||
SkRect r;
|
||||
r.set(0, 0, size.width(), size.height());
|
||||
return r;
|
||||
}
|
||||
|
||||
static SkRect MakeLTRB(SkScalar l, SkScalar t, SkScalar r, SkScalar b) {
|
||||
static SkRect SK_WARN_UNUSED_RESULT MakeLTRB(SkScalar l, SkScalar t, SkScalar r, SkScalar b) {
|
||||
SkRect rect;
|
||||
rect.set(l, t, r, b);
|
||||
return rect;
|
||||
}
|
||||
|
||||
static SkRect MakeXYWH(SkScalar x, SkScalar y, SkScalar w, SkScalar h) {
|
||||
static SkRect SK_WARN_UNUSED_RESULT MakeXYWH(SkScalar x, SkScalar y, SkScalar w, SkScalar h) {
|
||||
SkRect r;
|
||||
r.set(x, y, x + w, y + h);
|
||||
return r;
|
||||
@ -347,13 +354,18 @@ struct SK_API SkRect {
|
||||
*/
|
||||
bool isFinite() const {
|
||||
#ifdef SK_SCALAR_IS_FLOAT
|
||||
// x * 0 will be NaN iff x is infinity or NaN.
|
||||
// a + b will be NaN iff either a or b is NaN.
|
||||
float value = fLeft * 0 + fTop * 0 + fRight * 0 + fBottom * 0;
|
||||
float accum = 0;
|
||||
accum *= fLeft;
|
||||
accum *= fTop;
|
||||
accum *= fRight;
|
||||
accum *= fBottom;
|
||||
|
||||
// value is either NaN or it is finite (zero).
|
||||
// accum is either NaN or it is finite (zero).
|
||||
SkASSERT(0 == accum || !(accum == accum));
|
||||
|
||||
// value==value will be true iff value is not NaN
|
||||
return value == value;
|
||||
// TODO: is it faster to say !accum or accum==accum?
|
||||
return accum == accum;
|
||||
#else
|
||||
// use bit-or for speed, since we don't care about short-circuting the
|
||||
// tests, and we expect the common case will be that we need to check all.
|
||||
@ -363,6 +375,8 @@ struct SK_API SkRect {
|
||||
#endif
|
||||
}
|
||||
|
||||
SkScalar x() const { return fLeft; }
|
||||
SkScalar y() const { return fTop; }
|
||||
SkScalar left() const { return fLeft; }
|
||||
SkScalar top() const { return fTop; }
|
||||
SkScalar right() const { return fRight; }
|
||||
@ -427,6 +441,13 @@ struct SK_API SkRect {
|
||||
this->set(pts, count);
|
||||
}
|
||||
|
||||
void set(const SkPoint& p0, const SkPoint& p1) {
|
||||
fLeft = SkMinScalar(p0.fX, p1.fX);
|
||||
fRight = SkMaxScalar(p0.fX, p1.fX);
|
||||
fTop = SkMinScalar(p0.fY, p1.fY);
|
||||
fBottom = SkMaxScalar(p0.fY, p1.fY);
|
||||
}
|
||||
|
||||
void setXYWH(SkScalar x, SkScalar y, SkScalar width, SkScalar height) {
|
||||
fLeft = x;
|
||||
fTop = y;
|
||||
@ -479,7 +500,7 @@ struct SK_API SkRect {
|
||||
|
||||
/** Outset the rectangle by (dx,dy). If dx is positive, then the sides are
|
||||
moved outwards, making the rectangle wider. If dx is negative, then the
|
||||
sides are moved inwards, making the rectangle narrower. The same hods
|
||||
sides are moved inwards, making the rectangle narrower. The same holds
|
||||
true for dy and the top and bottom.
|
||||
*/
|
||||
void outset(SkScalar dx, SkScalar dy) { this->inset(-dx, -dy); }
|
||||
|
@ -15,12 +15,12 @@
|
||||
/** \class SkRefCnt
|
||||
|
||||
SkRefCnt is the base class for objects that may be shared by multiple
|
||||
objects. When a new owner wants a reference, it calls ref(). When an owner
|
||||
wants to release its reference, it calls unref(). When the shared object's
|
||||
reference count goes to zero as the result of an unref() call, its (virtual)
|
||||
destructor is called. It is an error for the destructor to be called
|
||||
explicitly (or via the object going out of scope on the stack or calling
|
||||
delete) if getRefCnt() > 1.
|
||||
objects. When an existing owner wants to share a reference, it calls ref().
|
||||
When an owner wants to release its reference, it calls unref(). When the
|
||||
shared object's reference count goes to zero as the result of an unref()
|
||||
call, its (virtual) destructor is called. It is an error for the
|
||||
destructor to be called explicitly (or via the object going out of scope on
|
||||
the stack or calling delete) if getRefCnt() > 1.
|
||||
*/
|
||||
class SK_API SkRefCnt : SkNoncopyable {
|
||||
public:
|
||||
@ -28,7 +28,7 @@ public:
|
||||
*/
|
||||
SkRefCnt() : fRefCnt(1) {}
|
||||
|
||||
/** Destruct, asserting that the reference count is 1.
|
||||
/** Destruct, asserting that the reference count is 1.
|
||||
*/
|
||||
virtual ~SkRefCnt() {
|
||||
#ifdef SK_DEBUG
|
||||
@ -45,19 +45,21 @@ public:
|
||||
*/
|
||||
void ref() const {
|
||||
SkASSERT(fRefCnt > 0);
|
||||
sk_atomic_inc(&fRefCnt);
|
||||
sk_atomic_inc(&fRefCnt); // No barrier required.
|
||||
}
|
||||
|
||||
/** Decrement the reference count. If the reference count is 1 before the
|
||||
decrement, then call delete on the object. Note that if this is the
|
||||
case, then the object needs to have been allocated via new, and not on
|
||||
the stack.
|
||||
decrement, then delete the object. Note that if this is the case, then
|
||||
the object needs to have been allocated via new, and not on the stack.
|
||||
*/
|
||||
void unref() const {
|
||||
SkASSERT(fRefCnt > 0);
|
||||
// Release barrier (SL/S), if not provided below.
|
||||
if (sk_atomic_dec(&fRefCnt) == 1) {
|
||||
fRefCnt = 1; // so our destructor won't complain
|
||||
SkDELETE(this);
|
||||
// Aquire barrier (L/SL), if not provided above.
|
||||
// Prevents code in dispose from happening before the decrement.
|
||||
sk_membar_aquire__after_atomic_dec();
|
||||
internal_dispose();
|
||||
}
|
||||
}
|
||||
|
||||
@ -66,6 +68,17 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
/** Called when the ref count goes to 0.
|
||||
*/
|
||||
virtual void internal_dispose() const {
|
||||
#ifdef SK_DEBUG
|
||||
// so our destructor won't complain
|
||||
fRefCnt = 1;
|
||||
#endif
|
||||
SkDELETE(this);
|
||||
}
|
||||
friend class SkWeakRefCnt;
|
||||
|
||||
mutable int32_t fRefCnt;
|
||||
};
|
||||
|
||||
@ -128,6 +141,9 @@ public:
|
||||
return obj;
|
||||
}
|
||||
|
||||
T* operator->() { return fObj; }
|
||||
operator T*() { return fObj; }
|
||||
|
||||
private:
|
||||
T* fObj;
|
||||
};
|
||||
|
@ -45,13 +45,13 @@ public:
|
||||
* Return true if the two regions are equal. i.e. The enclose exactly
|
||||
* the same area.
|
||||
*/
|
||||
friend bool operator==(const SkRegion& a, const SkRegion& b);
|
||||
bool operator==(const SkRegion& other) const;
|
||||
|
||||
/**
|
||||
* Return true if the two regions are not equal.
|
||||
*/
|
||||
friend bool operator!=(const SkRegion& a, const SkRegion& b) {
|
||||
return !(a == b);
|
||||
bool operator!=(const SkRegion& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -378,28 +378,53 @@ private:
|
||||
};
|
||||
|
||||
enum {
|
||||
kRectRegionRuns = 6 // need to store a region of a rect [T B L R S S]
|
||||
// T
|
||||
// [B N L R S]
|
||||
// S
|
||||
kRectRegionRuns = 7
|
||||
};
|
||||
|
||||
friend class android::Region; // needed for marshalling efficiently
|
||||
void allocateRuns(int count); // allocate space for count runs
|
||||
|
||||
struct RunHead;
|
||||
|
||||
// allocate space for count runs
|
||||
void allocateRuns(int count);
|
||||
void allocateRuns(int count, int ySpanCount, int intervalCount);
|
||||
void allocateRuns(const RunHead& src);
|
||||
|
||||
SkIRect fBounds;
|
||||
RunHead* fRunHead;
|
||||
|
||||
void freeRuns();
|
||||
const RunType* getRuns(RunType tmpStorage[], int* count) const;
|
||||
bool setRuns(RunType runs[], int count);
|
||||
void freeRuns();
|
||||
|
||||
/**
|
||||
* Return the runs from this region, consing up fake runs if the region
|
||||
* is empty or a rect. In those 2 cases, we use tmpStorage to hold the
|
||||
* run data.
|
||||
*/
|
||||
const RunType* getRuns(RunType tmpStorage[], int* intervals) const;
|
||||
|
||||
// This is called with runs[] that do not yet have their interval-count
|
||||
// field set on each scanline. That is computed as part of this call
|
||||
// (inside ComputeRunBounds).
|
||||
bool setRuns(RunType runs[], int count);
|
||||
|
||||
int count_runtype_values(int* itop, int* ibot) const;
|
||||
|
||||
static void BuildRectRuns(const SkIRect& bounds,
|
||||
RunType runs[kRectRegionRuns]);
|
||||
// returns true if runs are just a rect
|
||||
static bool ComputeRunBounds(const RunType runs[], int count,
|
||||
SkIRect* bounds);
|
||||
|
||||
// If the runs define a simple rect, return true and set bounds to that
|
||||
// rect. If not, return false and ignore bounds.
|
||||
static bool RunsAreARect(const SkRegion::RunType runs[], int count,
|
||||
SkIRect* bounds);
|
||||
|
||||
/**
|
||||
* If the last arg is null, just return if the result is non-empty,
|
||||
* else store the result in the last arg.
|
||||
*/
|
||||
static bool Oper(const SkRegion&, const SkRegion&, SkRegion::Op, SkRegion*);
|
||||
|
||||
friend struct RunHead;
|
||||
friend class Iterator;
|
||||
|
@ -305,19 +305,16 @@ static inline SkScalar SkScalarSignAsScalar(SkScalar x) {
|
||||
|
||||
#define SK_ScalarNearlyZero (SK_Scalar1 / (1 << 12))
|
||||
|
||||
/* <= is slower than < for floats, so we use < for our tolerance test
|
||||
*/
|
||||
|
||||
static inline bool SkScalarNearlyZero(SkScalar x,
|
||||
SkScalar tolerance = SK_ScalarNearlyZero) {
|
||||
SkASSERT(tolerance > 0);
|
||||
return SkScalarAbs(x) < tolerance;
|
||||
SkASSERT(tolerance >= 0);
|
||||
return SkScalarAbs(x) <= tolerance;
|
||||
}
|
||||
|
||||
static inline bool SkScalarNearlyEqual(SkScalar x, SkScalar y,
|
||||
SkScalar tolerance = SK_ScalarNearlyZero) {
|
||||
SkASSERT(tolerance > 0);
|
||||
return SkScalarAbs(x-y) < tolerance;
|
||||
SkASSERT(tolerance >= 0);
|
||||
return SkScalarAbs(x-y) <= tolerance;
|
||||
}
|
||||
|
||||
/** Linearly interpolate between A and B, based on t.
|
||||
|
@ -16,6 +16,8 @@
|
||||
#include "SkPath.h"
|
||||
#include "SkPoint.h"
|
||||
|
||||
//#define SK_USE_COLOR_LUMINANCE
|
||||
|
||||
class SkDescriptor;
|
||||
class SkMaskFilter;
|
||||
class SkPathEffect;
|
||||
@ -175,16 +177,27 @@ public:
|
||||
kLCD_Vertical_Flag = 0x0200, // else Horizontal
|
||||
kLCD_BGROrder_Flag = 0x0400, // else RGB order
|
||||
|
||||
// Generate A8 from LCD source (for GDI), only meaningful if fMaskFormat is kA8
|
||||
// Perhaps we can store this (instead) in fMaskFormat, in hight bit?
|
||||
kGenA8FromLCD_Flag = 0x0800,
|
||||
|
||||
#ifdef SK_USE_COLOR_LUMINANCE
|
||||
kLuminance_Bits = 3,
|
||||
#else
|
||||
// luminance : 0 for black text, kLuminance_Max for white text
|
||||
kLuminance_Shift = 11, // to shift into the other flags above
|
||||
kLuminance_Bits = 3 // ensure Flags doesn't exceed 16bits
|
||||
kLuminance_Shift = 13, // shift to land in the high 3-bits of Flags
|
||||
kLuminance_Bits = 3, // ensure Flags doesn't exceed 16bits
|
||||
#endif
|
||||
};
|
||||
|
||||
// computed values
|
||||
enum {
|
||||
kHinting_Mask = kHintingBit1_Flag | kHintingBit2_Flag,
|
||||
#ifdef SK_USE_COLOR_LUMINANCE
|
||||
#else
|
||||
kLuminance_Max = (1 << kLuminance_Bits) - 1,
|
||||
kLuminance_Mask = kLuminance_Max << kLuminance_Shift
|
||||
kLuminance_Mask = kLuminance_Max << kLuminance_Shift,
|
||||
#endif
|
||||
};
|
||||
|
||||
struct Rec {
|
||||
@ -193,6 +206,9 @@ public:
|
||||
SkScalar fTextSize, fPreScaleX, fPreSkewX;
|
||||
SkScalar fPost2x2[2][2];
|
||||
SkScalar fFrameWidth, fMiterLimit;
|
||||
#ifdef SK_USE_COLOR_LUMINANCE
|
||||
uint32_t fLumBits;
|
||||
#endif
|
||||
uint8_t fMaskFormat;
|
||||
uint8_t fStrokeJoin;
|
||||
uint16_t fFlags;
|
||||
@ -213,7 +229,20 @@ public:
|
||||
void setHinting(SkPaint::Hinting hinting) {
|
||||
fFlags = (fFlags & ~kHinting_Mask) | (hinting << kHinting_Shift);
|
||||
}
|
||||
|
||||
|
||||
SkMask::Format getFormat() const {
|
||||
return static_cast<SkMask::Format>(fMaskFormat);
|
||||
}
|
||||
|
||||
#ifdef SK_USE_COLOR_LUMINANCE
|
||||
SkColor getLuminanceColor() const {
|
||||
return fLumBits;
|
||||
}
|
||||
|
||||
void setLuminanceColor(SkColor c) {
|
||||
fLumBits = c;
|
||||
}
|
||||
#else
|
||||
unsigned getLuminanceBits() const {
|
||||
return (fFlags & kLuminance_Mask) >> kLuminance_Shift;
|
||||
}
|
||||
@ -230,10 +259,7 @@ public:
|
||||
lum |= (lum << kLuminance_Bits*2);
|
||||
return lum >> (4*kLuminance_Bits - 8);
|
||||
}
|
||||
|
||||
SkMask::Format getFormat() const {
|
||||
return static_cast<SkMask::Format>(fMaskFormat);
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
SkScalerContext(const SkDescriptor* desc);
|
||||
@ -272,7 +298,13 @@ public:
|
||||
void getFontMetrics(SkPaint::FontMetrics* mX,
|
||||
SkPaint::FontMetrics* mY);
|
||||
|
||||
#ifdef SK_BUILD_FOR_ANDROID
|
||||
unsigned getBaseGlyphCount(SkUnichar charCode);
|
||||
#endif
|
||||
|
||||
static inline void MakeRec(const SkPaint&, const SkMatrix*, Rec* rec);
|
||||
static inline void PostMakeRec(Rec*);
|
||||
|
||||
static SkScalerContext* Create(const SkDescriptor*);
|
||||
|
||||
protected:
|
||||
@ -296,7 +328,6 @@ private:
|
||||
SkPathEffect* fPathEffect;
|
||||
SkMaskFilter* fMaskFilter;
|
||||
SkRasterizer* fRasterizer;
|
||||
SkScalar fDevFrameWidth;
|
||||
|
||||
// if this is set, we draw the image from a path, rather than
|
||||
// calling generateImage.
|
||||
|
@ -20,10 +20,13 @@ class SkPath;
|
||||
|
||||
/** \class SkShader
|
||||
*
|
||||
* SkShader is the based class for objects that return horizontal spans of
|
||||
* colors during drawing. A subclass of SkShader is installed in a SkPaint
|
||||
* calling paint.setShader(shader). After that any object (other than a bitmap)
|
||||
* that is drawn with that paint will get its color(s) from the shader.
|
||||
* Shaders specify the source color(s) for what is being drawn. If a paint
|
||||
* has no shader, then the paint's color is used. If the paint has a
|
||||
* shader, then the shader's color(s) are use instead, but they are
|
||||
* modulated by the paint's alpha. This makes it easy to create a shader
|
||||
* once (e.g. bitmap tiling or gradient) and then change its transparency
|
||||
* w/o having to modify the original shader... only the paint's alpha needs
|
||||
* to be modified.
|
||||
*/
|
||||
class SK_API SkShader : public SkFlattenable {
|
||||
public:
|
||||
@ -49,9 +52,23 @@ public:
|
||||
void resetLocalMatrix();
|
||||
|
||||
enum TileMode {
|
||||
kClamp_TileMode, //!< replicate the edge color if the shader draws outside of its original bounds
|
||||
kRepeat_TileMode, //!< repeat the shader's image horizontally and vertically
|
||||
kMirror_TileMode, //!< repeat the shader's image horizontally and vertically, alternating mirror images so that adjacent images always seam
|
||||
/** replicate the edge color if the shader draws outside of its
|
||||
* original bounds
|
||||
*/
|
||||
kClamp_TileMode,
|
||||
|
||||
/** repeat the shader's image horizontally and vertically */
|
||||
kRepeat_TileMode,
|
||||
|
||||
/** repeat the shader's image horizontally and vertically, alternating
|
||||
* mirror images so that adjacent images always seam
|
||||
*/
|
||||
kMirror_TileMode,
|
||||
|
||||
#if 0
|
||||
/** only draw within the original domain, return 0 everywhere else */
|
||||
kDecal_TileMode,
|
||||
#endif
|
||||
|
||||
kTileModeCount
|
||||
};
|
||||
@ -271,15 +288,19 @@ public:
|
||||
// Factory methods for stock shaders
|
||||
|
||||
/** Call this to create a new shader that will draw with the specified bitmap.
|
||||
@param src The bitmap to use inside the shader
|
||||
@param tmx The tiling mode to use when sampling the bitmap in the x-direction.
|
||||
@param tmy The tiling mode to use when sampling the bitmap in the y-direction.
|
||||
@return Returns a new shader object. Note: this function never returns null.
|
||||
*
|
||||
* If the bitmap cannot be used (e.g. has no pixels, or its dimensions
|
||||
* exceed implementation limits (currently at 64K - 1)) then SkEmptyShader
|
||||
* may be returned.
|
||||
*
|
||||
* @param src The bitmap to use inside the shader
|
||||
* @param tmx The tiling mode to use when sampling the bitmap in the x-direction.
|
||||
* @param tmy The tiling mode to use when sampling the bitmap in the y-direction.
|
||||
* @return Returns a new shader object. Note: this function never returns null.
|
||||
*/
|
||||
static SkShader* CreateBitmapShader(const SkBitmap& src,
|
||||
TileMode tmx, TileMode tmy);
|
||||
|
||||
virtual void flatten(SkFlattenableWriteBuffer& ) SK_OVERRIDE;
|
||||
protected:
|
||||
enum MatrixClass {
|
||||
kLinear_MatrixClass, // no perspective
|
||||
@ -295,6 +316,7 @@ protected:
|
||||
MatrixClass getInverseClass() const { return (MatrixClass)fTotalInverseClass; }
|
||||
|
||||
SkShader(SkFlattenableReadBuffer& );
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
private:
|
||||
SkMatrix* fLocalMatrix;
|
||||
SkMatrix fTotalInverse;
|
||||
|
@ -31,14 +31,7 @@ public:
|
||||
*/
|
||||
void drawMatrix(SkCanvas*, const SkMatrix&);
|
||||
|
||||
// overrides
|
||||
virtual Factory getFactory();
|
||||
virtual void flatten(SkFlattenableWriteBuffer&);
|
||||
|
||||
// public for Registrar
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer&);
|
||||
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR()
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkShape)
|
||||
|
||||
protected:
|
||||
virtual void onDraw(SkCanvas*);
|
||||
|
@ -272,7 +272,8 @@ class SkMemoryWStream : public SkWStream {
|
||||
public:
|
||||
SkMemoryWStream(void* buffer, size_t size);
|
||||
virtual bool write(const void* buffer, size_t size) SK_OVERRIDE;
|
||||
|
||||
size_t bytesWritten() const { return fBytesWritten; }
|
||||
|
||||
private:
|
||||
char* fBuffer;
|
||||
size_t fMaxLength;
|
||||
|
@ -15,9 +15,18 @@
|
||||
/* Some helper functions for C strings
|
||||
*/
|
||||
|
||||
bool SkStrStartsWith(const char string[], const char prefix[]);
|
||||
static bool SkStrStartsWith(const char string[], const char prefix[]) {
|
||||
SkASSERT(string);
|
||||
SkASSERT(prefix);
|
||||
return !strncmp(string, prefix, strlen(prefix));
|
||||
}
|
||||
bool SkStrEndsWith(const char string[], const char suffix[]);
|
||||
int SkStrStartsWithOneOf(const char string[], const char prefixes[]);
|
||||
static bool SkStrContains(const char string[], const char substring[]) {
|
||||
SkASSERT(string);
|
||||
SkASSERT(substring);
|
||||
return (NULL != strstr(string, substring));
|
||||
}
|
||||
|
||||
#define SkStrAppendS32_MaxSize 11
|
||||
char* SkStrAppendS32(char buffer[], int32_t);
|
||||
@ -81,6 +90,9 @@ public:
|
||||
bool endsWith(const char suffix[]) const {
|
||||
return SkStrEndsWith(fRec->data(), suffix);
|
||||
}
|
||||
bool contains(const char substring[]) const {
|
||||
return SkStrContains(fRec->data(), substring);
|
||||
}
|
||||
|
||||
friend bool operator==(const SkString& a, const SkString& b) {
|
||||
return a.equals(b);
|
||||
|
@ -154,7 +154,7 @@ public:
|
||||
return this->append(1, NULL);
|
||||
}
|
||||
T* append(size_t count, const T* src = NULL) {
|
||||
unsigned oldCount = fCount;
|
||||
size_t oldCount = fCount;
|
||||
if (count) {
|
||||
SkASSERT(src == NULL || fArray == NULL ||
|
||||
src + count <= fArray || fArray + oldCount <= src);
|
||||
@ -179,7 +179,7 @@ public:
|
||||
T* insert(size_t index, size_t count, const T* src = NULL) {
|
||||
SkASSERT(count);
|
||||
SkASSERT(index <= fCount);
|
||||
int oldCount = fCount;
|
||||
size_t oldCount = fCount;
|
||||
this->growBy(count);
|
||||
T* dst = fArray + index;
|
||||
memmove(dst + count, dst, sizeof(T) * (oldCount - index));
|
||||
@ -197,7 +197,7 @@ public:
|
||||
|
||||
void removeShuffle(size_t index) {
|
||||
SkASSERT(index < fCount);
|
||||
unsigned newCount = fCount - 1;
|
||||
size_t newCount = fCount - 1;
|
||||
fCount = newCount;
|
||||
if (index != newCount) {
|
||||
memcpy(fArray + index, fArray + newCount, sizeof(T));
|
||||
|
@ -12,6 +12,24 @@
|
||||
|
||||
#include "SkTypes.h"
|
||||
|
||||
/**
|
||||
* All of the SkTSearch variants want to return the index (0...N-1) of the
|
||||
* found element, or the bit-not of where to insert the element.
|
||||
*
|
||||
* At a simple level, if the return value is negative, it was not found.
|
||||
*
|
||||
* For clients that want to insert the new element if it was not found, use
|
||||
* the following logic:
|
||||
*
|
||||
* int index = SkTSearch(...);
|
||||
* if (index >= 0) {
|
||||
* // found at index
|
||||
* } else {
|
||||
* index = ~index; // now we are positive
|
||||
* // insert at index
|
||||
* }
|
||||
*/
|
||||
|
||||
template <typename T>
|
||||
int SkTSearch(const T* base, int count, const T& target, size_t elemSize)
|
||||
{
|
||||
@ -47,7 +65,7 @@ int SkTSearch(const T* base, int count, const T& target, size_t elemSize)
|
||||
|
||||
template <typename T>
|
||||
int SkTSearch(const T* base, int count, const T& target, size_t elemSize,
|
||||
int (*compare)(const T&, const T&))
|
||||
int (*compare)(const T*, const T*))
|
||||
{
|
||||
SkASSERT(count >= 0);
|
||||
if (count <= 0) {
|
||||
@ -63,14 +81,14 @@ int SkTSearch(const T* base, int count, const T& target, size_t elemSize,
|
||||
int mid = (hi + lo) >> 1;
|
||||
const T* elem = (const T*)((const char*)base + mid * elemSize);
|
||||
|
||||
if ((*compare)(*elem, target) < 0)
|
||||
if ((*compare)(elem, &target) < 0)
|
||||
lo = mid + 1;
|
||||
else
|
||||
hi = mid;
|
||||
}
|
||||
|
||||
const T* elem = (const T*)((const char*)base + hi * elemSize);
|
||||
int pred = (*compare)(*elem, target);
|
||||
int pred = (*compare)(elem, &target);
|
||||
if (pred != 0) {
|
||||
if (pred < 0)
|
||||
hi += 1;
|
||||
@ -149,10 +167,8 @@ private:
|
||||
char fStorage[STORAGE+1];
|
||||
};
|
||||
|
||||
extern "C" {
|
||||
typedef int (*SkQSortCompareProc)(const void*, const void*);
|
||||
void SkQSort(void* base, size_t count, size_t elemSize, SkQSortCompareProc);
|
||||
}
|
||||
// Helper when calling qsort with a compare proc that has typed its arguments
|
||||
#define SkCastForQSort(compare) reinterpret_cast<int (*)(const void*, const void*)>(compare)
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -17,6 +17,7 @@
|
||||
|
||||
int32_t sk_atomic_inc(int32_t*);
|
||||
int32_t sk_atomic_dec(int32_t*);
|
||||
int32_t sk_atomic_conditional_inc(int32_t*);
|
||||
|
||||
class SkMutex {
|
||||
public:
|
||||
@ -31,31 +32,36 @@ public:
|
||||
|
||||
class SkAutoMutexAcquire : SkNoncopyable {
|
||||
public:
|
||||
explicit SkAutoMutexAcquire(SkMutex& mutex) : fMutex(&mutex)
|
||||
{
|
||||
explicit SkAutoMutexAcquire(SkBaseMutex& mutex) : fMutex(&mutex) {
|
||||
SkASSERT(fMutex != NULL);
|
||||
mutex.acquire();
|
||||
}
|
||||
/** If the mutex has not been release, release it now.
|
||||
*/
|
||||
~SkAutoMutexAcquire()
|
||||
{
|
||||
if (fMutex)
|
||||
fMutex->release();
|
||||
|
||||
SkAutoMutexAcquire(SkBaseMutex* mutex) : fMutex(mutex) {
|
||||
if (mutex) {
|
||||
mutex->acquire();
|
||||
}
|
||||
}
|
||||
|
||||
/** If the mutex has not been release, release it now.
|
||||
*/
|
||||
void release()
|
||||
{
|
||||
if (fMutex)
|
||||
{
|
||||
~SkAutoMutexAcquire() {
|
||||
if (fMutex) {
|
||||
fMutex->release();
|
||||
}
|
||||
}
|
||||
|
||||
/** If the mutex has not been release, release it now.
|
||||
*/
|
||||
void release() {
|
||||
if (fMutex) {
|
||||
fMutex->release();
|
||||
fMutex = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
SkMutex* fMutex;
|
||||
SkBaseMutex* fMutex;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -10,42 +10,154 @@
|
||||
#ifndef SkThread_platform_DEFINED
|
||||
#define SkThread_platform_DEFINED
|
||||
|
||||
#if defined(SK_BUILD_FOR_ANDROID) && !defined(SK_BUILD_FOR_ANDROID_NDK)
|
||||
#if defined(SK_BUILD_FOR_ANDROID)
|
||||
|
||||
#include <utils/threads.h>
|
||||
#if defined(SK_BUILD_FOR_ANDROID_NDK)
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
/* Just use the GCC atomic intrinsics. They're supported by the NDK toolchain,
|
||||
* have reasonable performance, and provide full memory barriers
|
||||
*/
|
||||
static inline __attribute__((always_inline)) int32_t sk_atomic_inc(int32_t *addr) {
|
||||
return __sync_fetch_and_add(addr, 1);
|
||||
}
|
||||
|
||||
static inline __attribute__((always_inline)) int32_t sk_atomic_dec(int32_t *addr) {
|
||||
return __sync_fetch_and_add(addr, -1);
|
||||
}
|
||||
static inline __attribute__((always_inline)) void sk_membar_aquire__after_atomic_dec() { }
|
||||
|
||||
static inline __attribute__((always_inline)) int32_t sk_atomic_conditional_inc(int32_t* addr) {
|
||||
int32_t value = *addr;
|
||||
|
||||
while (true) {
|
||||
if (value == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t before = __sync_val_compare_and_swap(addr, value, value + 1);
|
||||
|
||||
if (before == value) {
|
||||
return value;
|
||||
} else {
|
||||
value = before;
|
||||
}
|
||||
}
|
||||
}
|
||||
static inline __attribute__((always_inline)) void sk_membar_aquire__after_atomic_conditional_inc() { }
|
||||
|
||||
#else // !SK_BUILD_FOR_ANDROID_NDK
|
||||
|
||||
/* The platform atomics operations are slightly more efficient than the
|
||||
* GCC built-ins, so use them.
|
||||
*/
|
||||
#include <utils/Atomic.h>
|
||||
|
||||
#define sk_atomic_inc(addr) android_atomic_inc(addr)
|
||||
#define sk_atomic_dec(addr) android_atomic_dec(addr)
|
||||
void sk_membar_aquire__after_atomic_dec() {
|
||||
//HACK: Android is actually using full memory barriers.
|
||||
// Should this change, uncomment below.
|
||||
//int dummy;
|
||||
//android_atomic_aquire_store(0, &dummy);
|
||||
}
|
||||
int32_t sk_atomic_conditional_inc(int32_t* addr) {
|
||||
while (true) {
|
||||
int32_t value = *addr;
|
||||
if (value == 0) {
|
||||
return 0;
|
||||
}
|
||||
if (0 == android_atomic_release_cas(value, value + 1, addr)) {
|
||||
return value;
|
||||
}
|
||||
}
|
||||
}
|
||||
void sk_membar_aquire__after_atomic_conditional_inc() {
|
||||
//HACK: Android is actually using full memory barriers.
|
||||
// Should this change, uncomment below.
|
||||
//int dummy;
|
||||
//android_atomic_aquire_store(0, &dummy);
|
||||
}
|
||||
|
||||
class SkMutex : android::Mutex {
|
||||
public:
|
||||
// if isGlobal is true, then ignore any errors in the platform-specific
|
||||
// destructor
|
||||
SkMutex(bool isGlobal = true) {}
|
||||
~SkMutex() {}
|
||||
#endif // !SK_BUILD_FOR_ANDROID_NDK
|
||||
|
||||
void acquire() { this->lock(); }
|
||||
void release() { this->unlock(); }
|
||||
};
|
||||
#else // !SK_BUILD_FOR_ANDROID
|
||||
|
||||
#else
|
||||
|
||||
/** Implemented by the porting layer, this function adds 1 to the int specified
|
||||
by the address (in a thread-safe manner), and returns the previous value.
|
||||
/** Implemented by the porting layer, this function adds one to the int
|
||||
specified by the address (in a thread-safe manner), and returns the
|
||||
previous value.
|
||||
No additional memory barrier is required.
|
||||
This must act as a compiler barrier.
|
||||
*/
|
||||
SK_API int32_t sk_atomic_inc(int32_t* addr);
|
||||
/** Implemented by the porting layer, this function subtracts 1 to the int
|
||||
specified by the address (in a thread-safe manner), and returns the previous
|
||||
value.
|
||||
|
||||
/** Implemented by the porting layer, this function subtracts one from the int
|
||||
specified by the address (in a thread-safe manner), and returns the
|
||||
previous value.
|
||||
Expected to act as a release (SL/S) memory barrier and a compiler barrier.
|
||||
*/
|
||||
SK_API int32_t sk_atomic_dec(int32_t* addr);
|
||||
/** If sk_atomic_dec does not act as an aquire (L/SL) barrier, this is expected
|
||||
to act as an aquire (L/SL) memory barrier and as a compiler barrier.
|
||||
*/
|
||||
SK_API void sk_membar_aquire__after_atomic_dec();
|
||||
|
||||
class SkMutex {
|
||||
/** Implemented by the porting layer, this function adds one to the int
|
||||
specified by the address iff the int specified by the address is not zero
|
||||
(in a thread-safe manner), and returns the previous value.
|
||||
No additional memory barrier is required.
|
||||
This must act as a compiler barrier.
|
||||
*/
|
||||
SK_API int32_t sk_atomic_conditional_inc(int32_t*);
|
||||
/** If sk_atomic_conditional_inc does not act as an aquire (L/SL) barrier, this
|
||||
is expected to act as an aquire (L/SL) memory barrier and as a compiler
|
||||
barrier.
|
||||
*/
|
||||
SK_API void sk_membar_aquire__after_atomic_conditional_inc();
|
||||
|
||||
#endif // !SK_BUILD_FOR_ANDROID
|
||||
|
||||
#ifdef SK_USE_POSIX_THREADS
|
||||
|
||||
#include <pthread.h>
|
||||
|
||||
// A SkBaseMutex is a POD structure that can be directly initialized
|
||||
// at declaration time with SK_DECLARE_STATIC/GLOBAL_MUTEX. This avoids the
|
||||
// generation of a static initializer in the final machine code (and
|
||||
// a corresponding static finalizer).
|
||||
//
|
||||
struct SkBaseMutex {
|
||||
void acquire() { pthread_mutex_lock(&fMutex); }
|
||||
void release() { pthread_mutex_unlock(&fMutex); }
|
||||
pthread_mutex_t fMutex;
|
||||
};
|
||||
|
||||
// Using POD-style initialization prevents the generation of a static initializer
|
||||
// and keeps the acquire() implementation small and fast.
|
||||
#define SK_DECLARE_STATIC_MUTEX(name) static SkBaseMutex name = { PTHREAD_MUTEX_INITIALIZER }
|
||||
|
||||
// Special case used when the static mutex must be available globally.
|
||||
#define SK_DECLARE_GLOBAL_MUTEX(name) SkBaseMutex name = { PTHREAD_MUTEX_INITIALIZER }
|
||||
|
||||
#define SK_DECLARE_MUTEX_ARRAY(name, count) SkBaseMutex name[count] = { PTHREAD_MUTEX_INITIALIZER }
|
||||
|
||||
// A normal mutex that requires to be initialized through normal C++ construction,
|
||||
// i.e. when it's a member of another class, or allocated on the heap.
|
||||
class SkMutex : public SkBaseMutex, SkNoncopyable {
|
||||
public:
|
||||
// if isGlobal is true, then ignore any errors in the platform-specific
|
||||
// destructor
|
||||
SkMutex(bool isGlobal = true);
|
||||
SkMutex();
|
||||
~SkMutex();
|
||||
};
|
||||
|
||||
#else // !SK_USE_POSIX_THREADS
|
||||
|
||||
// In the generic case, SkBaseMutex and SkMutex are the same thing, and we
|
||||
// can't easily get rid of static initializers.
|
||||
//
|
||||
class SkMutex : SkNoncopyable {
|
||||
public:
|
||||
SkMutex();
|
||||
~SkMutex();
|
||||
|
||||
void acquire();
|
||||
@ -59,6 +171,13 @@ private:
|
||||
uint32_t fStorage[kStorageIntCount];
|
||||
};
|
||||
|
||||
#endif
|
||||
typedef SkMutex SkBaseMutex;
|
||||
|
||||
#define SK_DECLARE_STATIC_MUTEX(name) static SkBaseMutex name
|
||||
#define SK_DECLARE_GLOBAL_MUTEX(name) SkBaseMutex name
|
||||
#define SK_DECLARE_MUTEX_ARRAY(name, count) SkBaseMutex name[count]
|
||||
|
||||
#endif // !SK_USE_POSIX_THREADS
|
||||
|
||||
|
||||
#endif
|
||||
|
@ -11,13 +11,14 @@
|
||||
#define SkTypeface_DEFINED
|
||||
|
||||
#include "SkAdvancedTypefaceMetrics.h"
|
||||
#include "SkRefCnt.h"
|
||||
#include "SkWeakRefCnt.h"
|
||||
|
||||
class SkStream;
|
||||
class SkAdvancedTypefaceMetrics;
|
||||
class SkWStream;
|
||||
|
||||
typedef uint32_t SkFontID;
|
||||
typedef uint32_t SkFontTableTag;
|
||||
|
||||
/** \class SkTypeface
|
||||
|
||||
@ -28,7 +29,7 @@ typedef uint32_t SkFontID;
|
||||
|
||||
Typeface objects are immutable, and so they can be shared between threads.
|
||||
*/
|
||||
class SK_API SkTypeface : public SkRefCnt {
|
||||
class SK_API SkTypeface : public SkWeakRefCnt {
|
||||
public:
|
||||
/** Style specifies the intrinsic style attributes of a given typeface
|
||||
*/
|
||||
@ -61,7 +62,7 @@ public:
|
||||
data. Will never return 0.
|
||||
*/
|
||||
SkFontID uniqueID() const { return fUniqueID; }
|
||||
|
||||
|
||||
/** Return the uniqueID for the specified typeface. If the face is null,
|
||||
resolve it to the default font and return its uniqueID. Will never
|
||||
return 0.
|
||||
@ -84,18 +85,6 @@ public:
|
||||
*/
|
||||
static SkTypeface* CreateFromName(const char familyName[], Style style);
|
||||
|
||||
/** Return a new reference to the typeface that covers a set of Unicode
|
||||
code points with the specified Style. Use this call if you want to
|
||||
pick any font that covers a given string of text.
|
||||
|
||||
@param data UTF-16 characters
|
||||
@param bytelength length of data, in bytes
|
||||
@return reference to the closest-matching typeface. Call must call
|
||||
unref() when they are done.
|
||||
*/
|
||||
static SkTypeface* CreateForChars(const void* data, size_t bytelength,
|
||||
Style s);
|
||||
|
||||
/** Return a new reference to the typeface that most closely matches the
|
||||
requested typeface and specified Style. Use this call if you want to
|
||||
pick a new style from the same family of the existing typeface.
|
||||
@ -146,6 +135,46 @@ public:
|
||||
const uint32_t* glyphIDs = NULL,
|
||||
uint32_t glyphIDsCount = 0) const;
|
||||
|
||||
// Table getters -- may fail if the underlying font format is not organized
|
||||
// as 4-byte tables.
|
||||
|
||||
/** Return the number of tables in the font. */
|
||||
int countTables() const;
|
||||
|
||||
/** Copy into tags[] (allocated by the caller) the list of table tags in
|
||||
* the font, and return the number. This will be the same as CountTables()
|
||||
* or 0 if an error occured. If tags == NULL, this only returns the count
|
||||
* (the same as calling countTables()).
|
||||
*/
|
||||
int getTableTags(SkFontTableTag tags[]) const;
|
||||
|
||||
/** Given a table tag, return the size of its contents, or 0 if not present
|
||||
*/
|
||||
size_t getTableSize(SkFontTableTag) const;
|
||||
|
||||
/** Copy the contents of a table into data (allocated by the caller). Note
|
||||
* that the contents of the table will be in their native endian order
|
||||
* (which for most truetype tables is big endian). If the table tag is
|
||||
* not found, or there is an error copying the data, then 0 is returned.
|
||||
* If this happens, it is possible that some or all of the memory pointed
|
||||
* to by data may have been written to, even though an error has occured.
|
||||
*
|
||||
* @param fontID the font to copy the table from
|
||||
* @param tag The table tag whose contents are to be copied
|
||||
* @param offset The offset in bytes into the table's contents where the
|
||||
* copy should start from.
|
||||
* @param length The number of bytes, starting at offset, of table data
|
||||
* to copy.
|
||||
* @param data storage address where the table contents are copied to
|
||||
* @return the number of bytes actually copied into data. If offset+length
|
||||
* exceeds the table's size, then only the bytes up to the table's
|
||||
* size are actually copied, and this is the value returned. If
|
||||
* offset > the table's size, or tag is not a valid table,
|
||||
* then 0 is returned.
|
||||
*/
|
||||
size_t getTableData(SkFontTableTag tag, size_t offset, size_t length,
|
||||
void* data) const;
|
||||
|
||||
protected:
|
||||
/** uniqueID must be unique (please!) and non-zero
|
||||
*/
|
||||
@ -157,7 +186,7 @@ private:
|
||||
Style fStyle;
|
||||
bool fIsFixedWidth;
|
||||
|
||||
typedef SkRefCnt INHERITED;
|
||||
typedef SkWeakRefCnt INHERITED;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -204,7 +204,7 @@ static inline bool SkIsU16(long x) {
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
#ifndef SK_OFFSETOF
|
||||
#define SK_OFFSETOF(type, field) ((char*)&(((type*)1)->field) - (char*)1)
|
||||
#define SK_OFFSETOF(type, field) (size_t)((char*)&(((type*)1)->field) - (char*)1)
|
||||
#endif
|
||||
|
||||
/** Returns the number of entries in an array (not a pointer)
|
||||
@ -438,7 +438,7 @@ public:
|
||||
* current block is dynamically allocated, just return the old
|
||||
* block.
|
||||
*/
|
||||
kReuse_OnShrink
|
||||
kReuse_OnShrink,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -32,13 +32,6 @@ void sk_memset32_portable(uint32_t dst[], uint32_t value, int count);
|
||||
typedef void (*SkMemset32Proc)(uint32_t dst[], uint32_t value, int count);
|
||||
SkMemset32Proc SkMemset32GetPlatformProc();
|
||||
|
||||
#if defined(SK_BUILD_FOR_ANDROID) && !defined(SK_BUILD_FOR_ANDROID_NDK)
|
||||
#include "cutils/memory.h"
|
||||
|
||||
#define sk_memset16(dst, value, count) android_memset16(dst, value, (count) << 1)
|
||||
#define sk_memset32(dst, value, count) android_memset32(dst, value, (count) << 2)
|
||||
#endif
|
||||
|
||||
#ifndef sk_memset16
|
||||
extern SkMemset16Proc sk_memset16;
|
||||
#endif
|
||||
|
155
gfx/skia/include/core/SkWeakRefCnt.h
Normal file
155
gfx/skia/include/core/SkWeakRefCnt.h
Normal file
@ -0,0 +1,155 @@
|
||||
/*
|
||||
* Copyright 2012 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#ifndef SkWeakRefCnt_DEFINED
|
||||
#define SkWeakRefCnt_DEFINED
|
||||
|
||||
#include "SkRefCnt.h"
|
||||
#include "SkThread.h"
|
||||
|
||||
/** \class SkWeakRefCnt
|
||||
|
||||
SkWeakRefCnt is the base class for objects that may be shared by multiple
|
||||
objects. When an existing strong owner wants to share a reference, it calls
|
||||
ref(). When a strong owner wants to release its reference, it calls
|
||||
unref(). When the shared object's strong reference count goes to zero as
|
||||
the result of an unref() call, its (virtual) weak_dispose method is called.
|
||||
It is an error for the destructor to be called explicitly (or via the
|
||||
object going out of scope on the stack or calling delete) if
|
||||
getRefCnt() > 1.
|
||||
|
||||
In addition to strong ownership, an owner may instead obtain a weak
|
||||
reference by calling weak_ref(). A call to weak_ref() must be balanced my a
|
||||
call to weak_unref(). To obtain a strong reference from a weak reference,
|
||||
call try_ref(). If try_ref() returns true, the owner's pointer is now also
|
||||
a strong reference on which unref() must be called. Note that this does not
|
||||
affect the original weak reference, weak_unref() must still be called. When
|
||||
the weak reference count goes to zero, the object is deleted. While the
|
||||
weak reference count is positive and the strong reference count is zero the
|
||||
object still exists, but will be in the disposed state. It is up to the
|
||||
object to define what this means.
|
||||
|
||||
Note that a strong reference implicitly implies a weak reference. As a
|
||||
result, it is allowable for the owner of a strong ref to call try_ref().
|
||||
This will have the same effect as calling ref(), but may be more expensive.
|
||||
|
||||
Example:
|
||||
|
||||
SkWeakRefCnt myRef = strongRef.weak_ref();
|
||||
... // strongRef.unref() may or may not be called
|
||||
if (myRef.try_ref()) {
|
||||
... // use myRef
|
||||
myRef.unref();
|
||||
} else {
|
||||
// myRef is in the disposed state
|
||||
}
|
||||
myRef.weak_unref();
|
||||
*/
|
||||
class SK_API SkWeakRefCnt : public SkRefCnt {
|
||||
public:
|
||||
/** Default construct, initializing the reference counts to 1.
|
||||
The strong references collectively hold one weak reference. When the
|
||||
strong reference count goes to zero, the collectively held weak
|
||||
reference is released.
|
||||
*/
|
||||
SkWeakRefCnt() : SkRefCnt(), fWeakCnt(1) {}
|
||||
|
||||
/** Destruct, asserting that the weak reference count is 1.
|
||||
*/
|
||||
virtual ~SkWeakRefCnt() {
|
||||
#ifdef SK_DEBUG
|
||||
SkASSERT(fWeakCnt == 1);
|
||||
fWeakCnt = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/** Return the weak reference count.
|
||||
*/
|
||||
int32_t getWeakCnt() const { return fWeakCnt; }
|
||||
|
||||
void validate() const {
|
||||
SkRefCnt::validate();
|
||||
SkASSERT(fWeakCnt > 0);
|
||||
}
|
||||
|
||||
/** Creates a strong reference from a weak reference, if possible. The
|
||||
caller must already be an owner. If try_ref() returns true the owner
|
||||
is in posession of an additional strong reference. Both the original
|
||||
reference and new reference must be properly unreferenced. If try_ref()
|
||||
returns false, no strong reference could be created and the owner's
|
||||
reference is in the same state as before the call.
|
||||
*/
|
||||
bool SK_WARN_UNUSED_RESULT try_ref() const {
|
||||
if (sk_atomic_conditional_inc(&fRefCnt) != 0) {
|
||||
// Aquire barrier (L/SL), if not provided above.
|
||||
// Prevents subsequent code from happening before the increment.
|
||||
sk_membar_aquire__after_atomic_conditional_inc();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/** Increment the weak reference count. Must be balanced by a call to
|
||||
weak_unref().
|
||||
*/
|
||||
void weak_ref() const {
|
||||
SkASSERT(fRefCnt > 0);
|
||||
SkASSERT(fWeakCnt > 0);
|
||||
sk_atomic_inc(&fWeakCnt); // No barrier required.
|
||||
}
|
||||
|
||||
/** Decrement the weak reference count. If the weak reference count is 1
|
||||
before the decrement, then call delete on the object. Note that if this
|
||||
is the case, then the object needs to have been allocated via new, and
|
||||
not on the stack.
|
||||
*/
|
||||
void weak_unref() const {
|
||||
SkASSERT(fWeakCnt > 0);
|
||||
// Release barrier (SL/S), if not provided below.
|
||||
if (sk_atomic_dec(&fWeakCnt) == 1) {
|
||||
// Aquire barrier (L/SL), if not provided above.
|
||||
// Prevents code in destructor from happening before the decrement.
|
||||
sk_membar_aquire__after_atomic_dec();
|
||||
#ifdef SK_DEBUG
|
||||
// so our destructor won't complain
|
||||
fWeakCnt = 1;
|
||||
#endif
|
||||
SkRefCnt::internal_dispose();
|
||||
}
|
||||
}
|
||||
|
||||
/** Returns true if there are no strong references to the object. When this
|
||||
is the case all future calls to try_ref() will return false.
|
||||
*/
|
||||
bool weak_expired() const {
|
||||
return fRefCnt == 0;
|
||||
}
|
||||
|
||||
protected:
|
||||
/** Called when the strong reference count goes to zero. This allows the
|
||||
object to free any resources it may be holding. Weak references may
|
||||
still exist and their level of allowed access to the object is defined
|
||||
by the object's class.
|
||||
*/
|
||||
virtual void weak_dispose() const {
|
||||
}
|
||||
|
||||
private:
|
||||
/** Called when the strong reference count goes to zero. Calls weak_dispose
|
||||
on the object and releases the implicit weak reference held
|
||||
collectively by the strong references.
|
||||
*/
|
||||
virtual void internal_dispose() const SK_OVERRIDE {
|
||||
weak_dispose();
|
||||
weak_unref();
|
||||
}
|
||||
|
||||
/* Invariant: fWeakCnt = #weak + (fRefCnt > 0 ? 1 : 0) */
|
||||
mutable int32_t fWeakCnt;
|
||||
};
|
||||
|
||||
#endif
|
@ -15,20 +15,31 @@
|
||||
#include "SkScalar.h"
|
||||
#include "SkPoint.h"
|
||||
#include "SkRect.h"
|
||||
#include "SkMatrix.h"
|
||||
#include "SkRegion.h"
|
||||
|
||||
class SkStream;
|
||||
class SkWStream;
|
||||
|
||||
class SkWriter32 : SkNoncopyable {
|
||||
public:
|
||||
/**
|
||||
* The caller can specify an initial block of storage, which the caller manages.
|
||||
* SkWriter32 will not attempt to free this in its destructor. It is up to the
|
||||
* implementation to decide if, and how much, of the storage to utilize, and it
|
||||
* is possible that it may be ignored entirely.
|
||||
*/
|
||||
SkWriter32(size_t minSize, void* initialStorage, size_t storageSize);
|
||||
|
||||
SkWriter32(size_t minSize)
|
||||
: fMinSize(minSize),
|
||||
fSize(0),
|
||||
fSingleBlock(NULL),
|
||||
fSingleBlockSize(0),
|
||||
fHead(NULL),
|
||||
fTail(NULL) {
|
||||
}
|
||||
fTail(NULL),
|
||||
fHeadIsExternalStorage(false) {}
|
||||
|
||||
~SkWriter32();
|
||||
|
||||
/**
|
||||
@ -43,7 +54,7 @@ public:
|
||||
* dynamic allocation (and resets).
|
||||
*/
|
||||
void reset(void* block, size_t size);
|
||||
|
||||
|
||||
bool writeBool(bool value) {
|
||||
this->writeInt(value);
|
||||
return value;
|
||||
@ -76,7 +87,19 @@ public:
|
||||
void writeRect(const SkRect& rect) {
|
||||
*(SkRect*)this->reserve(sizeof(rect)) = rect;
|
||||
}
|
||||
|
||||
void writeMatrix(const SkMatrix& matrix) {
|
||||
size_t size = matrix.flatten(NULL);
|
||||
SkASSERT(SkAlign4(size) == size);
|
||||
matrix.flatten(this->reserve(size));
|
||||
}
|
||||
|
||||
void writeRegion(const SkRegion& rgn) {
|
||||
size_t size = rgn.flatten(NULL);
|
||||
SkASSERT(SkAlign4(size) == size);
|
||||
rgn.flatten(this->reserve(size));
|
||||
}
|
||||
|
||||
// write count bytes (must be a multiple of 4)
|
||||
void writeMul4(const void* values, size_t size) {
|
||||
this->write(values, size);
|
||||
@ -136,11 +159,13 @@ private:
|
||||
|
||||
char* fSingleBlock;
|
||||
uint32_t fSingleBlockSize;
|
||||
|
||||
|
||||
struct Block;
|
||||
Block* fHead;
|
||||
Block* fTail;
|
||||
|
||||
bool fHeadIsExternalStorage;
|
||||
|
||||
Block* newBlock(size_t bytes);
|
||||
};
|
||||
|
||||
|
@ -172,7 +172,7 @@ public:
|
||||
return AsMode(xfer, mode);
|
||||
}
|
||||
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR()
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP()
|
||||
protected:
|
||||
SkXfermode(SkFlattenableReadBuffer& rb) : SkFlattenable(rb) {}
|
||||
|
||||
@ -214,12 +214,11 @@ public:
|
||||
virtual void xferA8(SkAlpha dst[], const SkPMColor src[], int count,
|
||||
const SkAlpha aa[]) SK_OVERRIDE;
|
||||
|
||||
// overrides from SkFlattenable
|
||||
virtual Factory getFactory() SK_OVERRIDE { return CreateProc; }
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) SK_OVERRIDE;
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkProcXfermode)
|
||||
|
||||
protected:
|
||||
SkProcXfermode(SkFlattenableReadBuffer&);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
// allow subclasses to update this after we unflatten
|
||||
void setProc(SkXfermodeProc proc) {
|
||||
@ -229,9 +228,6 @@ protected:
|
||||
private:
|
||||
SkXfermodeProc fProc;
|
||||
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer& buffer) {
|
||||
return SkNEW_ARGS(SkProcXfermode, (buffer)); }
|
||||
|
||||
typedef SkXfermode INHERITED;
|
||||
};
|
||||
|
||||
|
@ -59,21 +59,15 @@ public:
|
||||
// override from SkPathEffect
|
||||
virtual bool filterPath(SkPath*, const SkPath&, SkScalar* width) SK_OVERRIDE;
|
||||
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer& buffer) {
|
||||
return SkNEW_ARGS(SkPath1DPathEffect, (buffer));
|
||||
}
|
||||
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR()
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkPath1DPathEffect)
|
||||
|
||||
protected:
|
||||
SkPath1DPathEffect(SkFlattenableReadBuffer& buffer);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
// overrides from Sk1DPathEffect
|
||||
virtual SkScalar begin(SkScalar contourLength) SK_OVERRIDE;
|
||||
virtual SkScalar next(SkPath*, SkScalar distance, SkPathMeasure&) SK_OVERRIDE;
|
||||
// overrides from SkFlattenable
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) SK_OVERRIDE;
|
||||
virtual Factory getFactory() SK_OVERRIDE { return CreateProc; }
|
||||
|
||||
private:
|
||||
SkPath fPath; // copied from constructor
|
||||
|
@ -21,9 +21,7 @@ public:
|
||||
// overrides
|
||||
virtual bool filterPath(SkPath*, const SkPath&, SkScalar* width) SK_OVERRIDE;
|
||||
|
||||
// overrides from SkFlattenable
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) SK_OVERRIDE;
|
||||
virtual Factory getFactory() SK_OVERRIDE;
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(Sk2DPathEffect)
|
||||
|
||||
protected:
|
||||
/** New virtual, to be overridden by subclasses.
|
||||
@ -46,17 +44,16 @@ protected:
|
||||
|
||||
// protected so that subclasses can call this during unflattening
|
||||
Sk2DPathEffect(SkFlattenableReadBuffer&);
|
||||
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR()
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
private:
|
||||
SkMatrix fMatrix, fInverse;
|
||||
bool fMatrixIsInvertible;
|
||||
|
||||
// illegal
|
||||
Sk2DPathEffect(const Sk2DPathEffect&);
|
||||
Sk2DPathEffect& operator=(const Sk2DPathEffect&);
|
||||
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer&);
|
||||
|
||||
friend class Sk2DPathEffectBlitter;
|
||||
typedef SkPathEffect INHERITED;
|
||||
};
|
||||
@ -69,15 +66,12 @@ public:
|
||||
*/
|
||||
SkPath2DPathEffect(const SkMatrix&, const SkPath&);
|
||||
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer&);
|
||||
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR()
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkPath2DPathEffect)
|
||||
|
||||
protected:
|
||||
SkPath2DPathEffect(SkFlattenableReadBuffer& buffer);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) SK_OVERRIDE;
|
||||
virtual Factory getFactory() SK_OVERRIDE;
|
||||
virtual void next(const SkPoint&, int u, int v, SkPath* dst) SK_OVERRIDE;
|
||||
|
||||
private:
|
||||
|
@ -51,26 +51,17 @@ public:
|
||||
virtual void xferA8(SkAlpha dst[], const SkPMColor src[], int count,
|
||||
const SkAlpha aa[]) SK_OVERRIDE;
|
||||
|
||||
// overrides from SkFlattenable
|
||||
virtual Factory getFactory() SK_OVERRIDE;
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) SK_OVERRIDE;
|
||||
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer& buffer) {
|
||||
return SkNEW_ARGS(SkAvoidXfermode, (buffer));
|
||||
}
|
||||
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR()
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkAvoidXfermode)
|
||||
|
||||
protected:
|
||||
SkAvoidXfermode(SkFlattenableReadBuffer&);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
private:
|
||||
SkColor fOpColor;
|
||||
uint32_t fDistMul; // x.14
|
||||
Mode fMode;
|
||||
|
||||
static SkFlattenable* Create(SkFlattenableReadBuffer&);
|
||||
|
||||
typedef SkXfermode INHERITED;
|
||||
};
|
||||
|
||||
|
@ -44,17 +44,11 @@ public:
|
||||
virtual void init(SkCanvas*);
|
||||
virtual bool next(SkCanvas*, SkPaint* paint);
|
||||
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer& buffer) {
|
||||
return SkNEW_ARGS(SkBlurDrawLooper, (buffer));
|
||||
}
|
||||
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR()
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkBlurDrawLooper)
|
||||
|
||||
protected:
|
||||
SkBlurDrawLooper(SkFlattenableReadBuffer&);
|
||||
// overrides from SkFlattenable
|
||||
virtual void flatten(SkFlattenableWriteBuffer& );
|
||||
virtual Factory getFactory() { return CreateProc; }
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
private:
|
||||
SkMaskFilter* fBlur;
|
||||
|
@ -17,19 +17,14 @@ public:
|
||||
|
||||
virtual bool asABlur(SkSize* sigma) const SK_OVERRIDE;
|
||||
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer& buffer) {
|
||||
return SkNEW_ARGS(SkBlurImageFilter, (buffer));
|
||||
}
|
||||
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR()
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkBlurImageFilter)
|
||||
|
||||
protected:
|
||||
explicit SkBlurImageFilter(SkFlattenableReadBuffer& buffer);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
virtual bool onFilterImage(Proxy*, const SkBitmap& src, const SkMatrix&,
|
||||
SkBitmap* result, SkIPoint* offset) SK_OVERRIDE;
|
||||
virtual void flatten(SkFlattenableWriteBuffer& buffer) SK_OVERRIDE;
|
||||
virtual Factory getFactory() SK_OVERRIDE { return CreateProc; }
|
||||
|
||||
private:
|
||||
SkSize fSigma;
|
||||
|
@ -55,8 +55,7 @@ public:
|
||||
SkScalar ambient, SkScalar specular,
|
||||
SkScalar blurRadius);
|
||||
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR()
|
||||
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP()
|
||||
private:
|
||||
SkBlurMaskFilter(); // can't be instantiated
|
||||
};
|
||||
|
@ -13,7 +13,7 @@
|
||||
#include "SkColorFilter.h"
|
||||
#include "SkColorMatrix.h"
|
||||
|
||||
class SkColorMatrixFilter : public SkColorFilter {
|
||||
class SK_API SkColorMatrixFilter : public SkColorFilter {
|
||||
public:
|
||||
SkColorMatrixFilter();
|
||||
explicit SkColorMatrixFilter(const SkColorMatrix&);
|
||||
@ -28,24 +28,17 @@ public:
|
||||
virtual uint32_t getFlags() SK_OVERRIDE;
|
||||
virtual bool asColorMatrix(SkScalar matrix[20]) SK_OVERRIDE;
|
||||
|
||||
// overrides for SkFlattenable
|
||||
virtual void flatten(SkFlattenableWriteBuffer& buffer) SK_OVERRIDE;
|
||||
|
||||
struct State {
|
||||
int32_t fArray[20];
|
||||
int fShift;
|
||||
int32_t fResult[4];
|
||||
};
|
||||
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer& buffer);
|
||||
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR()
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkColorMatrixFilter)
|
||||
|
||||
protected:
|
||||
// overrides for SkFlattenable
|
||||
virtual Factory getFactory();
|
||||
|
||||
SkColorMatrixFilter(SkFlattenableReadBuffer& buffer);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
private:
|
||||
|
||||
|
@ -29,18 +29,11 @@ public:
|
||||
// This method is not exported to java.
|
||||
virtual bool filterPath(SkPath* dst, const SkPath& src, SkScalar* width);
|
||||
|
||||
// overrides for SkFlattenable
|
||||
// This method is not exported to java.
|
||||
virtual Factory getFactory();
|
||||
// This method is not exported to java.
|
||||
virtual void flatten(SkFlattenableWriteBuffer&);
|
||||
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer&);
|
||||
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR()
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkCornerPathEffect)
|
||||
|
||||
protected:
|
||||
SkCornerPathEffect(SkFlattenableReadBuffer&);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
private:
|
||||
SkScalar fRadius;
|
||||
|
@ -18,11 +18,25 @@
|
||||
*/
|
||||
class SK_API SkDashPathEffect : public SkPathEffect {
|
||||
public:
|
||||
/** The intervals array must contain an even number of entries (>=2), with the even
|
||||
indices specifying the "on" intervals, and the odd indices specifying the "off"
|
||||
intervals. phase is an offset into the intervals array (mod the sum of all of the
|
||||
intervals).
|
||||
Note: only affects framed paths
|
||||
/** intervals: array containing an even number of entries (>=2), with
|
||||
the even indices specifying the length of "on" intervals, and the odd
|
||||
indices specifying the length of "off" intervals.
|
||||
count: number of elements in the intervals array
|
||||
phase: offset into the intervals array (mod the sum of all of the
|
||||
intervals).
|
||||
|
||||
For example: if intervals[] = {10, 20}, count = 2, and phase = 25,
|
||||
this will set up a dashed path like so:
|
||||
5 pixels off
|
||||
10 pixels on
|
||||
20 pixels off
|
||||
10 pixels on
|
||||
20 pixels off
|
||||
...
|
||||
A phase of -5, 25, 55, 85, etc. would all result in the same path,
|
||||
because the sum of all the intervals is 30.
|
||||
|
||||
Note: only affects stroked paths.
|
||||
*/
|
||||
SkDashPathEffect(const SkScalar intervals[], int count, SkScalar phase, bool scaleToFit = false);
|
||||
virtual ~SkDashPathEffect();
|
||||
@ -34,15 +48,11 @@ public:
|
||||
// overrides for SkFlattenable
|
||||
// This method is not exported to java.
|
||||
virtual Factory getFactory();
|
||||
// This method is not exported to java.
|
||||
virtual void flatten(SkFlattenableWriteBuffer&);
|
||||
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer&);
|
||||
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR()
|
||||
|
||||
protected:
|
||||
SkDashPathEffect(SkFlattenableReadBuffer&);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
private:
|
||||
SkScalar* fIntervals;
|
||||
|
@ -28,18 +28,11 @@ public:
|
||||
// This method is not exported to java.
|
||||
virtual bool filterPath(SkPath* dst, const SkPath& src, SkScalar* width);
|
||||
|
||||
// overrides for SkFlattenable
|
||||
// This method is not exported to java.
|
||||
virtual Factory getFactory();
|
||||
// This method is not exported to java.
|
||||
virtual void flatten(SkFlattenableWriteBuffer&);
|
||||
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer&);
|
||||
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR()
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkDiscretePathEffect)
|
||||
|
||||
protected:
|
||||
SkDiscretePathEffect(SkFlattenableReadBuffer&);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
private:
|
||||
SkScalar fSegLength, fPerterb;
|
||||
|
@ -34,23 +34,15 @@ public:
|
||||
virtual bool filterMask(SkMask* dst, const SkMask& src, const SkMatrix&,
|
||||
SkIPoint* margin);
|
||||
|
||||
// overrides from SkFlattenable
|
||||
|
||||
// This method is not exported to java.
|
||||
virtual Factory getFactory();
|
||||
// This method is not exported to java.
|
||||
virtual void flatten(SkFlattenableWriteBuffer&);
|
||||
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR()
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkEmbossMaskFilter)
|
||||
|
||||
protected:
|
||||
SkEmbossMaskFilter(SkFlattenableReadBuffer&);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
private:
|
||||
Light fLight;
|
||||
SkScalar fBlurRadius;
|
||||
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer&);
|
||||
|
||||
typedef SkMaskFilter INHERITED;
|
||||
};
|
||||
|
@ -113,7 +113,7 @@ public:
|
||||
const SkColor colors[], const SkScalar pos[],
|
||||
int count, SkUnitMapper* mapper = NULL);
|
||||
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR()
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP()
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -131,20 +131,14 @@ public:
|
||||
*/
|
||||
void removeAllShapes();
|
||||
|
||||
// overrides
|
||||
virtual Factory getFactory();
|
||||
virtual void flatten(SkFlattenableWriteBuffer&);
|
||||
|
||||
// public for Registrar
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer&);
|
||||
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR()
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkGroupShape)
|
||||
|
||||
protected:
|
||||
// overrides
|
||||
virtual void onDraw(SkCanvas*);
|
||||
|
||||
SkGroupShape(SkFlattenableReadBuffer&);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
private:
|
||||
struct Rec {
|
||||
|
@ -23,11 +23,9 @@ public:
|
||||
virtual SkMask::Format getFormat();
|
||||
virtual bool filterMask(SkMask*, const SkMask&, const SkMatrix&, SkIPoint*);
|
||||
|
||||
// overrides from SkFlattenable
|
||||
virtual void flatten(SkFlattenableWriteBuffer& wb);
|
||||
|
||||
protected:
|
||||
SkKernel33ProcMaskFilter(SkFlattenableReadBuffer& rb);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
private:
|
||||
int fPercent256;
|
||||
@ -48,16 +46,14 @@ public:
|
||||
// override from SkKernel33ProcMaskFilter
|
||||
virtual uint8_t computeValue(uint8_t* const* srcRows);
|
||||
|
||||
// overrides from SkFlattenable
|
||||
virtual void flatten(SkFlattenableWriteBuffer& wb);
|
||||
virtual Factory getFactory();
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkKernel33MaskFilter)
|
||||
|
||||
private:
|
||||
int fKernel[3][3];
|
||||
int fShift;
|
||||
|
||||
SkKernel33MaskFilter(SkFlattenableReadBuffer& rb);
|
||||
static SkFlattenable* Create(SkFlattenableReadBuffer& rb);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
typedef SkKernel33ProcMaskFilter INHERITED;
|
||||
};
|
||||
|
@ -41,7 +41,7 @@ public:
|
||||
* - Flags and Color are always computed using the LayerInfo's
|
||||
* fFlagsMask and fColorMode.
|
||||
*/
|
||||
kEntirePaint_Bits = -1
|
||||
kEntirePaint_Bits = -1,
|
||||
|
||||
};
|
||||
typedef int32_t BitFlags;
|
||||
@ -101,19 +101,11 @@ public:
|
||||
virtual void init(SkCanvas*);
|
||||
virtual bool next(SkCanvas*, SkPaint* paint);
|
||||
|
||||
// must be public for Registrar :(
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer& buffer) {
|
||||
return SkNEW_ARGS(SkLayerDrawLooper, (buffer));
|
||||
}
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkLayerDrawLooper)
|
||||
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR()
|
||||
|
||||
protected:
|
||||
SkLayerDrawLooper(SkFlattenableReadBuffer&);
|
||||
|
||||
// overrides from SkFlattenable
|
||||
virtual void flatten(SkFlattenableWriteBuffer& );
|
||||
virtual Factory getFactory() { return CreateProc; }
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
private:
|
||||
struct Rec {
|
||||
|
@ -32,16 +32,11 @@ public:
|
||||
*/
|
||||
void addLayer(const SkPaint& paint, SkScalar dx, SkScalar dy);
|
||||
|
||||
// overrides from SkFlattenable
|
||||
virtual Factory getFactory();
|
||||
virtual void flatten(SkFlattenableWriteBuffer&);
|
||||
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer&);
|
||||
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR()
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkLayerRasterizer)
|
||||
|
||||
protected:
|
||||
SkLayerRasterizer(SkFlattenableReadBuffer&);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
// override from SkRasterizer
|
||||
virtual bool onRasterize(const SkPath& path, const SkMatrix& matrix,
|
||||
|
64
gfx/skia/include/effects/SkMorphologyImageFilter.h
Normal file
64
gfx/skia/include/effects/SkMorphologyImageFilter.h
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Copyright 2012 The Android Open Source Project
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
|
||||
#ifndef SkMorphologyImageFilter_DEFINED
|
||||
#define SkMorphologyImageFilter_DEFINED
|
||||
|
||||
#include "SkImageFilter.h"
|
||||
|
||||
class SK_API SkMorphologyImageFilter : public SkImageFilter {
|
||||
public:
|
||||
SkMorphologyImageFilter(int radiusX, int radiusY);
|
||||
|
||||
protected:
|
||||
SkMorphologyImageFilter(SkFlattenableReadBuffer& buffer);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
SkISize radius() const { return fRadius; }
|
||||
|
||||
private:
|
||||
SkISize fRadius;
|
||||
typedef SkImageFilter INHERITED;
|
||||
};
|
||||
|
||||
class SK_API SkDilateImageFilter : public SkMorphologyImageFilter {
|
||||
public:
|
||||
SkDilateImageFilter(int radiusX, int radiusY) : INHERITED(radiusX, radiusY) {}
|
||||
|
||||
virtual bool asADilate(SkISize* radius) const SK_OVERRIDE;
|
||||
virtual bool onFilterImage(Proxy*, const SkBitmap& src, const SkMatrix&,
|
||||
SkBitmap* result, SkIPoint* offset) SK_OVERRIDE;
|
||||
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkDilateImageFilter)
|
||||
|
||||
protected:
|
||||
SkDilateImageFilter(SkFlattenableReadBuffer& buffer) : INHERITED(buffer) {}
|
||||
|
||||
private:
|
||||
typedef SkMorphologyImageFilter INHERITED;
|
||||
};
|
||||
|
||||
class SK_API SkErodeImageFilter : public SkMorphologyImageFilter {
|
||||
public:
|
||||
SkErodeImageFilter(int radiusX, int radiusY) : INHERITED(radiusX, radiusY) {}
|
||||
|
||||
virtual bool asAnErode(SkISize* radius) const SK_OVERRIDE;
|
||||
virtual bool onFilterImage(Proxy*, const SkBitmap& src, const SkMatrix&,
|
||||
SkBitmap* result, SkIPoint* offset) SK_OVERRIDE;
|
||||
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkErodeImageFilter)
|
||||
|
||||
protected:
|
||||
SkErodeImageFilter(SkFlattenableReadBuffer& buffer) : INHERITED(buffer) {}
|
||||
|
||||
private:
|
||||
typedef SkMorphologyImageFilter INHERITED;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -20,7 +20,6 @@ public:
|
||||
virtual void filter(SkPaint*, Type);
|
||||
|
||||
private:
|
||||
uint32_t fPrevFlags; // local cache for filter/restore
|
||||
uint16_t fClearFlags; // user specified
|
||||
uint16_t fSetFlags; // user specified
|
||||
};
|
||||
|
@ -21,27 +21,18 @@ class SkPixelXorXfermode : public SkXfermode {
|
||||
public:
|
||||
SkPixelXorXfermode(SkColor opColor) : fOpColor(opColor) {}
|
||||
|
||||
// override from SkFlattenable
|
||||
virtual Factory getFactory();
|
||||
virtual void flatten(SkFlattenableWriteBuffer&);
|
||||
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer& buffer) {
|
||||
return SkNEW_ARGS(SkPixelXorXfermode, (buffer));
|
||||
}
|
||||
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR()
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkPixelXorXfermode)
|
||||
|
||||
protected:
|
||||
SkPixelXorXfermode(SkFlattenableReadBuffer& rb);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
// override from SkXfermode
|
||||
virtual SkPMColor xferColor(SkPMColor src, SkPMColor dst);
|
||||
|
||||
private:
|
||||
SkColor fOpColor;
|
||||
|
||||
SkPixelXorXfermode(SkFlattenableReadBuffer& rb);
|
||||
// our private factory
|
||||
static SkFlattenable* Create(SkFlattenableReadBuffer&);
|
||||
|
||||
typedef SkXfermode INHERITED;
|
||||
};
|
||||
|
||||
|
@ -19,11 +19,9 @@ public:
|
||||
SkPaint& paint() { return fPaint; }
|
||||
const SkPaint& paint() const { return fPaint; }
|
||||
|
||||
// overrides
|
||||
virtual void flatten(SkFlattenableWriteBuffer&);
|
||||
|
||||
protected:
|
||||
SkPaintShape(SkFlattenableReadBuffer& buffer);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
private:
|
||||
SkPaint fPaint;
|
||||
@ -40,17 +38,11 @@ public:
|
||||
void setCircle(SkScalar x, SkScalar y, SkScalar radius);
|
||||
void setRRect(const SkRect&, SkScalar rx, SkScalar ry);
|
||||
|
||||
// overrides
|
||||
virtual Factory getFactory();
|
||||
virtual void flatten(SkFlattenableWriteBuffer&);
|
||||
|
||||
// public for Registrar
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer&);
|
||||
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR()
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkRectShape)
|
||||
|
||||
protected:
|
||||
SkRectShape(SkFlattenableReadBuffer&);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
// overrides
|
||||
virtual void onDraw(SkCanvas*);
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
#include "SkColorFilter.h"
|
||||
|
||||
class SkTableColorFilter {
|
||||
class SK_API SkTableColorFilter {
|
||||
public:
|
||||
/**
|
||||
* Create a table colorfilter, copying the table into the filter, and
|
||||
@ -29,6 +29,8 @@ public:
|
||||
const uint8_t tableR[256],
|
||||
const uint8_t tableG[256],
|
||||
const uint8_t tableB[256]);
|
||||
|
||||
SK_DECLARE_FLATTENABLE_REGISTRAR_GROUP()
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -51,13 +51,11 @@ public:
|
||||
virtual SkMask::Format getFormat();
|
||||
virtual bool filterMask(SkMask*, const SkMask&, const SkMatrix&, SkIPoint*);
|
||||
|
||||
// overrides from SkFlattenable
|
||||
virtual void flatten(SkFlattenableWriteBuffer& wb);
|
||||
virtual Factory getFactory();
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkTableMaskFilter)
|
||||
|
||||
protected:
|
||||
SkTableMaskFilter(SkFlattenableReadBuffer& rb);
|
||||
static SkFlattenable* Factory(SkFlattenableReadBuffer&);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
private:
|
||||
uint8_t fTable[256];
|
||||
|
@ -3,6 +3,7 @@
|
||||
#define _SkTestImageFilters_h
|
||||
|
||||
#include "SkImageFilter.h"
|
||||
#include "SkColorFilter.h"
|
||||
|
||||
class SkOffsetImageFilter : public SkImageFilter {
|
||||
public:
|
||||
@ -10,19 +11,15 @@ public:
|
||||
fOffset.set(dx, dy);
|
||||
}
|
||||
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer& buffer) {
|
||||
return SkNEW_ARGS(SkOffsetImageFilter, (buffer));
|
||||
}
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkOffsetImageFilter)
|
||||
|
||||
protected:
|
||||
SkOffsetImageFilter(SkFlattenableReadBuffer& buffer);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
virtual bool onFilterImage(Proxy*, const SkBitmap& src, const SkMatrix&,
|
||||
SkBitmap* result, SkIPoint* loc) SK_OVERRIDE;
|
||||
virtual bool onFilterBounds(const SkIRect&, const SkMatrix&, SkIRect*) SK_OVERRIDE;
|
||||
// overrides from SkFlattenable
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) SK_OVERRIDE;
|
||||
virtual Factory getFactory() SK_OVERRIDE;
|
||||
|
||||
private:
|
||||
SkVector fOffset;
|
||||
@ -40,19 +37,15 @@ public:
|
||||
}
|
||||
virtual ~SkComposeImageFilter();
|
||||
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer& buffer) {
|
||||
return SkNEW_ARGS(SkComposeImageFilter, (buffer));
|
||||
}
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkComposeImageFilter)
|
||||
|
||||
protected:
|
||||
SkComposeImageFilter(SkFlattenableReadBuffer& buffer);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
virtual bool onFilterImage(Proxy*, const SkBitmap& src, const SkMatrix&,
|
||||
SkBitmap* result, SkIPoint* loc) SK_OVERRIDE;
|
||||
virtual bool onFilterBounds(const SkIRect&, const SkMatrix&, SkIRect*) SK_OVERRIDE;
|
||||
// overrides from SkFlattenable
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) SK_OVERRIDE;
|
||||
virtual Factory getFactory() SK_OVERRIDE;
|
||||
|
||||
private:
|
||||
SkImageFilter* fOuter;
|
||||
@ -71,19 +64,15 @@ public:
|
||||
const SkXfermode::Mode modes[] = NULL);
|
||||
virtual ~SkMergeImageFilter();
|
||||
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer& buffer) {
|
||||
return SkNEW_ARGS(SkMergeImageFilter, (buffer));
|
||||
}
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkMergeImageFilter)
|
||||
|
||||
protected:
|
||||
SkMergeImageFilter(SkFlattenableReadBuffer& buffer);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
virtual bool onFilterImage(Proxy*, const SkBitmap& src, const SkMatrix&,
|
||||
SkBitmap* result, SkIPoint* loc) SK_OVERRIDE;
|
||||
virtual bool onFilterBounds(const SkIRect&, const SkMatrix&, SkIRect*) SK_OVERRIDE;
|
||||
// overrides from SkFlattenable
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) SK_OVERRIDE;
|
||||
virtual Factory getFactory() SK_OVERRIDE;
|
||||
|
||||
private:
|
||||
SkImageFilter** fFilters;
|
||||
@ -100,8 +89,6 @@ private:
|
||||
typedef SkImageFilter INHERITED;
|
||||
};
|
||||
|
||||
class SkColorFilter;
|
||||
|
||||
class SkColorFilterImageFilter : public SkImageFilter {
|
||||
public:
|
||||
SkColorFilterImageFilter(SkColorFilter* cf) : fColorFilter(cf) {
|
||||
@ -109,18 +96,14 @@ public:
|
||||
}
|
||||
virtual ~SkColorFilterImageFilter();
|
||||
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer& buffer) {
|
||||
return SkNEW_ARGS(SkColorFilterImageFilter, (buffer));
|
||||
}
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkColorFilterImageFilter)
|
||||
|
||||
protected:
|
||||
SkColorFilterImageFilter(SkFlattenableReadBuffer& buffer);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
virtual bool onFilterImage(Proxy*, const SkBitmap& src, const SkMatrix&,
|
||||
SkBitmap* result, SkIPoint* loc) SK_OVERRIDE;
|
||||
// overrides from SkFlattenable
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) SK_OVERRIDE;
|
||||
virtual Factory getFactory() SK_OVERRIDE;
|
||||
|
||||
private:
|
||||
SkColorFilter* fColorFilter;
|
||||
@ -135,18 +118,14 @@ class SkDownSampleImageFilter : public SkImageFilter {
|
||||
public:
|
||||
SkDownSampleImageFilter(SkScalar scale) : fScale(scale) {}
|
||||
|
||||
static SkFlattenable* CreateProc(SkFlattenableReadBuffer& buffer) {
|
||||
return SkNEW_ARGS(SkDownSampleImageFilter, (buffer));
|
||||
}
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkDownSampleImageFilter)
|
||||
|
||||
protected:
|
||||
SkDownSampleImageFilter(SkFlattenableReadBuffer& buffer);
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) const SK_OVERRIDE;
|
||||
|
||||
virtual bool onFilterImage(Proxy*, const SkBitmap& src, const SkMatrix&,
|
||||
SkBitmap* result, SkIPoint* loc) SK_OVERRIDE;
|
||||
// overrides from SkFlattenable
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) SK_OVERRIDE;
|
||||
virtual Factory getFactory() SK_OVERRIDE;
|
||||
|
||||
private:
|
||||
SkScalar fScale;
|
||||
|
@ -23,9 +23,7 @@ public:
|
||||
virtual void shadeSpan(int x, int y, SkPMColor[], int count) SK_OVERRIDE;
|
||||
virtual void shadeSpan16(int x, int y, uint16_t span[], int count) SK_OVERRIDE;
|
||||
|
||||
// overrides for SkFlattenable
|
||||
virtual Factory getFactory() SK_OVERRIDE;
|
||||
virtual void flatten(SkFlattenableWriteBuffer&) SK_OVERRIDE;
|
||||
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkTransparentShader)
|
||||
|
||||
private:
|
||||
// these are a cache from the call to setContext()
|
||||
@ -34,10 +32,6 @@ private:
|
||||
|
||||
SkTransparentShader(SkFlattenableReadBuffer& buffer) : INHERITED(buffer) {}
|
||||
|
||||
static SkFlattenable* Create(SkFlattenableReadBuffer& buffer) {
|
||||
return SkNEW_ARGS(SkTransparentShader, (buffer));
|
||||
}
|
||||
|
||||
typedef SkShader INHERITED;
|
||||
};
|
||||
|
||||
|
@ -13,9 +13,9 @@
|
||||
|
||||
#include "GrClipIterator.h"
|
||||
#include "GrRect.h"
|
||||
#include "GrPath.h"
|
||||
#include "GrTemplates.h"
|
||||
|
||||
#include "SkPath.h"
|
||||
#include "SkTArray.h"
|
||||
|
||||
class GrClip {
|
||||
@ -39,11 +39,13 @@ public:
|
||||
|
||||
const GrRect& getConservativeBounds() const { return fConservativeBounds; }
|
||||
|
||||
bool requiresAA() const { return fRequiresAA; }
|
||||
|
||||
int getElementCount() const { return fList.count(); }
|
||||
|
||||
GrClipType getElementType(int i) const { return fList[i].fType; }
|
||||
|
||||
const GrPath& getPath(int i) const {
|
||||
const SkPath& getPath(int i) const {
|
||||
GrAssert(kPath_ClipType == fList[i].fType);
|
||||
return fList[i].fPath;
|
||||
}
|
||||
@ -58,12 +60,14 @@ public:
|
||||
return fList[i].fRect;
|
||||
}
|
||||
|
||||
GrSetOp getOp(int i) const { return fList[i].fOp; }
|
||||
SkRegion::Op getOp(int i) const { return fList[i].fOp; }
|
||||
|
||||
bool getDoAA(int i) const { return fList[i].fDoAA; }
|
||||
|
||||
bool isRect() const {
|
||||
if (1 == fList.count() && kRect_ClipType == fList[0].fType &&
|
||||
(kIntersect_SetOp == fList[0].fOp ||
|
||||
kReplace_SetOp == fList[0].fOp)) {
|
||||
(SkRegion::kIntersect_Op == fList[0].fOp ||
|
||||
SkRegion::kReplace_Op == fList[0].fOp)) {
|
||||
// if we determined that the clip is a single rect
|
||||
// we ought to have also used that rect as the bounds.
|
||||
GrAssert(fConservativeBoundsValid);
|
||||
@ -107,13 +111,14 @@ public:
|
||||
|
||||
private:
|
||||
struct Element {
|
||||
GrClipType fType;
|
||||
GrRect fRect;
|
||||
GrPath fPath;
|
||||
GrPathFill fPathFill;
|
||||
GrSetOp fOp;
|
||||
GrClipType fType;
|
||||
GrRect fRect;
|
||||
SkPath fPath;
|
||||
GrPathFill fPathFill;
|
||||
SkRegion::Op fOp;
|
||||
bool fDoAA;
|
||||
bool operator ==(const Element& e) const {
|
||||
if (e.fType != fType || e.fOp != fOp) {
|
||||
if (e.fType != fType || e.fOp != fOp || e.fDoAA != fDoAA) {
|
||||
return false;
|
||||
}
|
||||
switch (fType) {
|
||||
@ -132,6 +137,8 @@ private:
|
||||
GrRect fConservativeBounds;
|
||||
bool fConservativeBoundsValid;
|
||||
|
||||
bool fRequiresAA;
|
||||
|
||||
enum {
|
||||
kPreAllocElements = 4,
|
||||
};
|
||||
|
@ -11,8 +11,9 @@
|
||||
#ifndef GrClipIterator_DEFINED
|
||||
#define GrClipIterator_DEFINED
|
||||
|
||||
#include "GrPath.h"
|
||||
#include "GrRect.h"
|
||||
#include "SkPath.h"
|
||||
#include "SkRegion.h"
|
||||
|
||||
/**
|
||||
* A clip is a list of paths and/or rects with set operations to combine them.
|
||||
@ -40,7 +41,7 @@ public:
|
||||
* Return the current path. It is an error to call this when isDone() is
|
||||
* true or when getType() is kRect_Type.
|
||||
*/
|
||||
virtual const GrPath* getPath() = 0;
|
||||
virtual const SkPath* getPath() = 0;
|
||||
|
||||
/**
|
||||
* Return the fill rule for the path. It is an error to call this when
|
||||
@ -58,7 +59,12 @@ public:
|
||||
* Gets the operation used to apply the current item to previously iterated
|
||||
* items. Iterators should not produce a Replace op.
|
||||
*/
|
||||
virtual GrSetOp getOp() const = 0;
|
||||
virtual SkRegion::Op getOp() const = 0;
|
||||
|
||||
/**
|
||||
* Gets anti-aliasing setting desired for the current clip
|
||||
*/
|
||||
virtual bool getDoAA() const = 0;
|
||||
|
||||
/**
|
||||
* Call to move to the next element in the list, previous path iter can be
|
||||
|
@ -182,7 +182,7 @@ typedef unsigned __int64 uint64_t;
|
||||
// debug -vs- release
|
||||
//
|
||||
|
||||
extern GR_API void GrPrintf(const char format[], ...);
|
||||
#define GrPrintf SkDebugf
|
||||
|
||||
/**
|
||||
* GR_STRING makes a string of X where X is expanded before conversion to a string
|
||||
@ -364,23 +364,6 @@ inline void GrCrash(const char* msg) { GrPrintf(msg); GrAlwaysAssert(false); }
|
||||
#define GR_GEOM_BUFFER_LOCK_THRESHOLD (1 << 15)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Enables/disables use of offscreen AA
|
||||
*/
|
||||
#if !defined(GR_USE_OFFSCREEN_AA)
|
||||
#define GR_USE_OFFSCREEN_AA 1
|
||||
#endif
|
||||
|
||||
/**
|
||||
* GR_MAX_OFFSCREEN_AA_SIZE controls the size at which offscreen AA will tile.
|
||||
* Tiling saves GPU memory by limiting the size of the offscreen buffer. The
|
||||
* max offscreen may be as large as (4*GR_MAX_OFFSCREEN_AA_SIZE)^2 pixels.
|
||||
*/
|
||||
#if !defined(GR_MAX_OFFSCREEN_AA_SIZE)
|
||||
#define GR_MAX_OFFSCREEN_AA_SIZE 256
|
||||
#endif
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// tail section:
|
||||
//
|
||||
|
@ -16,6 +16,8 @@
|
||||
// remove.
|
||||
#include "GrRenderTarget.h"
|
||||
|
||||
class GrAutoScratchTexture;
|
||||
class GrDrawState;
|
||||
class GrDrawTarget;
|
||||
class GrFontCache;
|
||||
class GrGpu;
|
||||
@ -30,6 +32,7 @@ class GrResourceCache;
|
||||
class GrStencilBuffer;
|
||||
class GrVertexBuffer;
|
||||
class GrVertexBufferAllocPool;
|
||||
class GrSoftwarePathRenderer;
|
||||
|
||||
class GR_API GrContext : public GrRefCnt {
|
||||
public:
|
||||
@ -75,6 +78,11 @@ public:
|
||||
*/
|
||||
void freeGpuResources();
|
||||
|
||||
/**
|
||||
* Returns the number of bytes of GPU memory hosted by the texture cache.
|
||||
*/
|
||||
size_t getGpuTextureCacheBytes() const;
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Textures
|
||||
|
||||
@ -82,7 +90,7 @@ public:
|
||||
* Token that refers to an entry in the texture cache. Returned by
|
||||
* functions that lock textures. Passed to unlockTexture.
|
||||
*/
|
||||
class TextureCacheEntry {
|
||||
class SK_API TextureCacheEntry {
|
||||
public:
|
||||
TextureCacheEntry() : fEntry(NULL) {}
|
||||
TextureCacheEntry(const TextureCacheEntry& e) : fEntry(e.fEntry) {}
|
||||
@ -267,6 +275,11 @@ public:
|
||||
const GrRenderTarget* getRenderTarget() const;
|
||||
GrRenderTarget* getRenderTarget();
|
||||
|
||||
/**
|
||||
* Can the provided configuration act as a color render target?
|
||||
*/
|
||||
bool isConfigRenderable(GrPixelConfig config) const;
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Platform Surfaces
|
||||
|
||||
@ -295,26 +308,6 @@ public:
|
||||
GrRenderTarget* createPlatformRenderTarget(
|
||||
const GrPlatformRenderTargetDesc& desc);
|
||||
|
||||
/**
|
||||
* This interface is depracted and will be removed in a future revision.
|
||||
* Callers should use createPlatformTexture or createPlatformRenderTarget
|
||||
* instead.
|
||||
*
|
||||
* Wraps an existing 3D API surface in a GrObject. desc.fFlags determines
|
||||
* the type of object returned. If kIsTexture is set the returned object
|
||||
* will be a GrTexture*. Otherwise, it will be a GrRenderTarget*. If both
|
||||
* are set the render target object is accessible by
|
||||
* GrTexture::asRenderTarget().
|
||||
*
|
||||
* GL: if the object is a texture Gr may change its GL texture parameters
|
||||
* when it is drawn.
|
||||
*
|
||||
* @param desc description of the object to create.
|
||||
* @return either a GrTexture* or GrRenderTarget* depending on desc. NULL
|
||||
* on failure.
|
||||
*/
|
||||
GrResource* createPlatformSurface(const GrPlatformSurfaceDesc& desc);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Matrix state
|
||||
|
||||
@ -419,7 +412,7 @@ public:
|
||||
* @param translate optional additional translation applied to the
|
||||
* path.
|
||||
*/
|
||||
void drawPath(const GrPaint& paint, const GrPath& path, GrPathFill fill,
|
||||
void drawPath(const GrPaint& paint, const SkPath& path, GrPathFill fill,
|
||||
const GrPoint* translate = NULL);
|
||||
|
||||
/**
|
||||
@ -447,14 +440,23 @@ public:
|
||||
const uint16_t indices[],
|
||||
int indexCount);
|
||||
|
||||
/**
|
||||
* Draws an oval.
|
||||
*
|
||||
* @param paint describes how to color pixels.
|
||||
* @param rect the bounding rect of the oval.
|
||||
* @param strokeWidth if strokeWidth < 0, then the oval is filled, else
|
||||
* the rect is stroked based on strokeWidth. If
|
||||
* strokeWidth == 0, then the stroke is always a single
|
||||
* pixel thick.
|
||||
*/
|
||||
void drawOval(const GrPaint& paint,
|
||||
const GrRect& rect,
|
||||
SkScalar strokeWidth);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Misc.
|
||||
|
||||
/**
|
||||
* Currently needed by SkGpuDevice. Ideally this shouldn't be exposed.
|
||||
*/
|
||||
bool supportsShaders() const;
|
||||
|
||||
/**
|
||||
* Flags that affect flush() behavior.
|
||||
*/
|
||||
@ -582,31 +584,57 @@ public:
|
||||
* @param dst the render target to copy to.
|
||||
*/
|
||||
void copyTexture(GrTexture* src, GrRenderTarget* dst);
|
||||
|
||||
/**
|
||||
* Applies a 1D convolution kernel in the X direction to a rectangle of
|
||||
* pixels from a given texture.
|
||||
* @param texture the texture to read from
|
||||
* @param rect the destination rectangle
|
||||
* @param kernel the convolution kernel (kernelWidth elements)
|
||||
* @param kernelWidth the width of the convolution kernel
|
||||
* Resolves a render target that has MSAA. The intermediate MSAA buffer is
|
||||
* downsampled to the associated GrTexture (accessible via
|
||||
* GrRenderTarget::asTexture()). Any pending draws to the render target will
|
||||
* be executed before the resolve.
|
||||
*
|
||||
* This is only necessary when a client wants to access the object directly
|
||||
* using the underlying graphics API. GrContext will detect when it must
|
||||
* perform a resolve to a GrTexture used as the source of a draw or before
|
||||
* reading pixels back from a GrTexture or GrRenderTarget.
|
||||
*/
|
||||
void convolveInX(GrTexture* texture,
|
||||
const SkRect& rect,
|
||||
const float* kernel,
|
||||
int kernelWidth);
|
||||
void resolveRenderTarget(GrRenderTarget* target);
|
||||
|
||||
/**
|
||||
* Applies a 1D convolution kernel in the Y direction to a rectangle of
|
||||
* pixels from a given texture.
|
||||
* direction.
|
||||
* @param texture the texture to read from
|
||||
* @param rect the destination rectangle
|
||||
* @param kernel the convolution kernel (kernelWidth elements)
|
||||
* @param kernelWidth the width of the convolution kernel
|
||||
* Applies a 2D Gaussian blur to a given texture.
|
||||
* @param srcTexture The source texture to be blurred.
|
||||
* @param temp1 A scratch texture. Must not be NULL.
|
||||
* @param temp2 A scratch texture. May be NULL, in which case
|
||||
* srcTexture is overwritten with intermediate
|
||||
* results.
|
||||
* @param rect The destination rectangle.
|
||||
* @param sigmaX The blur's standard deviation in X.
|
||||
* @param sigmaY The blur's standard deviation in Y.
|
||||
* @return the blurred texture, which may be temp1, temp2 or srcTexture.
|
||||
*/
|
||||
void convolveInY(GrTexture* texture,
|
||||
const SkRect& rect,
|
||||
const float* kernel,
|
||||
int kernelWidth);
|
||||
GrTexture* gaussianBlur(GrTexture* srcTexture,
|
||||
GrAutoScratchTexture* temp1,
|
||||
GrAutoScratchTexture* temp2,
|
||||
const SkRect& rect,
|
||||
float sigmaX, float sigmaY);
|
||||
|
||||
/**
|
||||
* Applies a 2D morphology to a given texture.
|
||||
* @param srcTexture The source texture to be blurred.
|
||||
* @param rect The destination rectangle.
|
||||
* @param temp1 A scratch texture. Must not be NULL.
|
||||
* @param temp2 A scratch texture. Must not be NULL.
|
||||
* @param filter The morphology filter. Must be kDilate_Filter or
|
||||
* kErode_Filter.
|
||||
* @param radius The morphology radius in X and Y. The filter is
|
||||
* applied to a fWidth by fHeight rectangle of
|
||||
* pixels.
|
||||
* @return the morphed texture, which may be temp1, temp2 or srcTexture.
|
||||
*/
|
||||
GrTexture* applyMorphology(GrTexture* srcTexture,
|
||||
const GrRect& rect,
|
||||
GrTexture* temp1, GrTexture* temp2,
|
||||
GrSamplerState::Filter filter,
|
||||
SkISize radius);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Helpers
|
||||
|
||||
@ -637,7 +665,6 @@ public:
|
||||
const GrGpu* getGpu() const { return fGpu; }
|
||||
GrFontCache* getFontCache() { return fFontCache; }
|
||||
GrDrawTarget* getTextTarget(const GrPaint& paint);
|
||||
void flushText();
|
||||
const GrIndexBuffer* getQuadIndexBuffer() const;
|
||||
void resetStats();
|
||||
const GrGpuStats& getStats() const;
|
||||
@ -654,20 +681,40 @@ public:
|
||||
void unlockStencilBuffer(GrResourceEntry* sbEntry);
|
||||
GrStencilBuffer* findStencilBuffer(int width, int height, int sampleCnt);
|
||||
|
||||
/*
|
||||
* postClipPush acts as a hint to this and lower-level classes (e.g.,
|
||||
* GrGpu) that the clip stack has changed.
|
||||
*/
|
||||
virtual void postClipPush();
|
||||
|
||||
/*
|
||||
* preClipPop acts as a hint that the clip stack has been restored to an
|
||||
* earlier state.
|
||||
*/
|
||||
virtual void preClipPop();
|
||||
|
||||
GrPathRenderer* getPathRenderer(const SkPath& path,
|
||||
GrPathFill fill,
|
||||
const GrDrawTarget* target,
|
||||
bool antiAlias,
|
||||
bool allowSW);
|
||||
|
||||
private:
|
||||
// used to keep track of when we need to flush the draw buffer
|
||||
enum DrawCategory {
|
||||
kBuffered_DrawCategory, // last draw was inserted in draw buffer
|
||||
kUnbuffered_DrawCategory, // last draw was not inserted in the draw buffer
|
||||
kText_DrawCategory // text context was last to draw
|
||||
};
|
||||
DrawCategory fLastDrawCategory;
|
||||
|
||||
GrGpu* fGpu;
|
||||
GrDrawState* fDrawState;
|
||||
|
||||
GrResourceCache* fTextureCache;
|
||||
GrFontCache* fFontCache;
|
||||
|
||||
GrPathRendererChain* fPathRendererChain;
|
||||
GrSoftwarePathRenderer* fSoftwarePathRenderer;
|
||||
|
||||
GrVertexBufferAllocPool* fDrawBufferVBAllocPool;
|
||||
GrIndexBufferAllocPool* fDrawBufferIBAllocPool;
|
||||
@ -675,7 +722,6 @@ private:
|
||||
|
||||
GrIndexBuffer* fAAFillRectIndexBuffer;
|
||||
GrIndexBuffer* fAAStrokeRectIndexBuffer;
|
||||
int fMaxOffscreenAASize;
|
||||
|
||||
GrContext(GrGpu* gpu);
|
||||
|
||||
@ -698,52 +744,12 @@ private:
|
||||
|
||||
void flushDrawBuffer();
|
||||
|
||||
void setPaint(const GrPaint& paint, GrDrawTarget* target);
|
||||
void setPaint(const GrPaint& paint);
|
||||
|
||||
GrDrawTarget* prepareToDraw(const GrPaint& paint, DrawCategory drawType);
|
||||
|
||||
GrPathRenderer* getPathRenderer(const GrPath& path,
|
||||
GrPathFill fill,
|
||||
bool antiAlias);
|
||||
|
||||
struct OffscreenRecord;
|
||||
|
||||
// determines whether offscreen AA should be applied
|
||||
bool doOffscreenAA(GrDrawTarget* target,
|
||||
bool isHairLines) const;
|
||||
|
||||
// attempts to setup offscreen AA. All paint state must be transferred to
|
||||
// target by the time this is called.
|
||||
bool prepareForOffscreenAA(GrDrawTarget* target,
|
||||
bool requireStencil,
|
||||
const GrIRect& boundRect,
|
||||
GrPathRenderer* pr,
|
||||
OffscreenRecord* record);
|
||||
|
||||
// sets up target to draw coverage to the supersampled render target
|
||||
void setupOffscreenAAPass1(GrDrawTarget* target,
|
||||
const GrIRect& boundRect,
|
||||
int tileX, int tileY,
|
||||
OffscreenRecord* record);
|
||||
|
||||
// sets up target to sample coverage of supersampled render target back
|
||||
// to the main render target using stage kOffscreenStage.
|
||||
void doOffscreenAAPass2(GrDrawTarget* target,
|
||||
const GrPaint& paint,
|
||||
const GrIRect& boundRect,
|
||||
int tileX, int tileY,
|
||||
OffscreenRecord* record);
|
||||
|
||||
// restored the draw target state and releases offscreen target to cache
|
||||
void cleanupOffscreenAA(GrDrawTarget* target,
|
||||
GrPathRenderer* pr,
|
||||
OffscreenRecord* record);
|
||||
|
||||
void convolve(GrTexture* texture,
|
||||
const SkRect& rect,
|
||||
float imageIncrement[2],
|
||||
const float* kernel,
|
||||
int kernelWidth);
|
||||
void internalDrawPath(const GrPaint& paint, const SkPath& path,
|
||||
GrPathFill fill, const GrPoint* translate);
|
||||
|
||||
/**
|
||||
* Flags to the internal read/write pixels funcs
|
||||
@ -862,6 +868,7 @@ public:
|
||||
GrContext::kApprox_ScratchTexMatch) {
|
||||
if (NULL != fContext) {
|
||||
fContext->unlockTexture(fEntry);
|
||||
fEntry.reset();
|
||||
}
|
||||
fContext = context;
|
||||
if (NULL != fContext) {
|
||||
@ -883,4 +890,3 @@ private:
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
|
120
gfx/skia/include/gpu/GrContextFactory.h
Normal file
120
gfx/skia/include/gpu/GrContextFactory.h
Normal file
@ -0,0 +1,120 @@
|
||||
/*
|
||||
* Copyright 2012 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#ifndef GrContextFactory_DEFINED
|
||||
#define GrContextFactory_DEFINED
|
||||
|
||||
#if SK_ANGLE
|
||||
#include "gl/SkANGLEGLContext.h"
|
||||
#endif
|
||||
#include "gl/SkDebugGLContext.h"
|
||||
#if SK_MESA
|
||||
#include "gl/SkMesaGLContext.h"
|
||||
#endif
|
||||
#include "gl/SkNativeGLContext.h"
|
||||
#include "gl/SkNullGLContext.h"
|
||||
|
||||
#include "GrContext.h"
|
||||
|
||||
/**
|
||||
* This is a simple class that is useful in test apps that use different
|
||||
* GrContexts backed by different types of GL contexts. It manages creating the
|
||||
* GL context and a GrContext that uses it. The GL/Gr contexts persist until the
|
||||
* factory is destroyed (though the caller can always grab a ref on the returned
|
||||
* GrContext to make it outlive the factory).
|
||||
*/
|
||||
class GrContextFactory : GrNoncopyable {
|
||||
public:
|
||||
/**
|
||||
* Types of GL contexts supported.
|
||||
*/
|
||||
enum GLContextType {
|
||||
kNative_GLContextType,
|
||||
#if SK_ANGLE
|
||||
kANGLE_GLContextType,
|
||||
#endif
|
||||
#if SK_MESA
|
||||
kMESA_GLContextType,
|
||||
#endif
|
||||
kNull_GLContextType,
|
||||
kDebug_GLContextType,
|
||||
};
|
||||
|
||||
GrContextFactory() {
|
||||
}
|
||||
|
||||
~GrContextFactory() {
|
||||
for (int i = 0; i < fContexts.count(); ++i) {
|
||||
fContexts[i].fGrContext->unref();
|
||||
fContexts[i].fGLContext->unref();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a GrContext initalized with a type of GL context.
|
||||
*/
|
||||
GrContext* get(GLContextType type) {
|
||||
|
||||
for (int i = 0; i < fContexts.count(); ++i) {
|
||||
if (fContexts[i].fType == type) {
|
||||
return fContexts[i].fGrContext;
|
||||
}
|
||||
}
|
||||
SkAutoTUnref<SkGLContext> glCtx;
|
||||
SkAutoTUnref<GrContext> grCtx;
|
||||
switch (type) {
|
||||
case kNative_GLContextType:
|
||||
glCtx.reset(new SkNativeGLContext());
|
||||
break;
|
||||
#ifdef SK_ANGLE
|
||||
case kANGLE_GLContextType:
|
||||
glCtx.reset(new SkANGLEGLContext());
|
||||
break;
|
||||
#endif
|
||||
#ifdef SK_MESA
|
||||
case kMESA_GLContextType:
|
||||
glCtx.reset(new SkMesaGLContext());
|
||||
break;
|
||||
#endif
|
||||
case kNull_GLContextType:
|
||||
glCtx.reset(new SkNullGLContext());
|
||||
break;
|
||||
case kDebug_GLContextType:
|
||||
glCtx.reset(new SkDebugGLContext());
|
||||
break;
|
||||
}
|
||||
static const int kBogusSize = 1;
|
||||
if (!glCtx.get()) {
|
||||
return NULL;
|
||||
}
|
||||
if (!glCtx.get()->init(kBogusSize, kBogusSize)) {
|
||||
return NULL;
|
||||
}
|
||||
GrPlatform3DContext p3dctx =
|
||||
reinterpret_cast<GrPlatform3DContext>(glCtx.get()->gl());
|
||||
grCtx.reset(GrContext::Create(kOpenGL_Shaders_GrEngine, p3dctx));
|
||||
if (!grCtx.get()) {
|
||||
return NULL;
|
||||
}
|
||||
GPUContext& ctx = fContexts.push_back();
|
||||
ctx.fGLContext = glCtx.get();
|
||||
ctx.fGLContext->ref();
|
||||
ctx.fGrContext = grCtx.get();
|
||||
ctx.fGrContext->ref();
|
||||
ctx.fType = type;
|
||||
return ctx.fGrContext;
|
||||
}
|
||||
private:
|
||||
struct GPUContext {
|
||||
GLContextType fType;
|
||||
SkGLContext* fGLContext;
|
||||
GrContext* fGrContext;
|
||||
};
|
||||
SkTArray<GPUContext, true> fContexts;
|
||||
};
|
||||
|
||||
#endif
|
71
gfx/skia/include/gpu/GrCustomStage.h
Normal file
71
gfx/skia/include/gpu/GrCustomStage.h
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
* Copyright 2012 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#ifndef GrCustomStage_DEFINED
|
||||
#define GrCustomStage_DEFINED
|
||||
|
||||
#include "GrRefCnt.h"
|
||||
#include "GrNoncopyable.h"
|
||||
#include "GrProgramStageFactory.h"
|
||||
|
||||
class GrContext;
|
||||
|
||||
/** Provides custom vertex shader, fragment shader, uniform data for a
|
||||
particular stage of the Ganesh shading pipeline.
|
||||
Subclasses must have a function that produces a human-readable name:
|
||||
static const char* Name();
|
||||
*/
|
||||
class GrCustomStage : public GrRefCnt {
|
||||
|
||||
public:
|
||||
typedef GrProgramStageFactory::StageKey StageKey;
|
||||
|
||||
GrCustomStage();
|
||||
virtual ~GrCustomStage();
|
||||
|
||||
/** If given an input texture that is/is not opaque, is this
|
||||
stage guaranteed to produce an opaque output? */
|
||||
virtual bool isOpaque(bool inputTextureIsOpaque) const;
|
||||
|
||||
/** This object, besides creating back-end-specific helper
|
||||
objects, is used for run-time-type-identification. The factory should be
|
||||
an instance of templated class, GrTProgramStageFactory. It is templated
|
||||
on the subclass of GrCustomStage. The subclass must have a nested type
|
||||
(or typedef) named GLProgramStage which will be the subclass of
|
||||
GrGLProgramStage created by the factory.
|
||||
|
||||
Example:
|
||||
class MyCustomStage : public GrCustomStage {
|
||||
...
|
||||
virtual const GrProgramStageFactory& getFactory() const
|
||||
SK_OVERRIDE {
|
||||
return GrTProgramStageFactory<MyCustomStage>::getInstance();
|
||||
}
|
||||
...
|
||||
};
|
||||
*/
|
||||
virtual const GrProgramStageFactory& getFactory() const = 0;
|
||||
|
||||
/** Returns true if the other custom stage will generate
|
||||
equal output.
|
||||
Must only be called if the two are already known to be of the
|
||||
same type (i.e. they return the same value from getFactory()).
|
||||
For equivalence (that they will generate the same
|
||||
shader, but perhaps have different uniforms), check equality
|
||||
of the stageKey produced by the GrProgramStageFactory. */
|
||||
virtual bool isEqual(const GrCustomStage *) const = 0;
|
||||
|
||||
/** Human-meaningful string to identify this effect; may be embedded
|
||||
in generated shader code. */
|
||||
const char* name() const { return this->getFactory().name(); }
|
||||
|
||||
private:
|
||||
|
||||
typedef GrRefCnt INHERITED;
|
||||
};
|
||||
|
||||
#endif
|
@ -11,8 +11,8 @@
|
||||
#ifndef GrGlyph_DEFINED
|
||||
#define GrGlyph_DEFINED
|
||||
|
||||
#include "GrPath.h"
|
||||
#include "GrRect.h"
|
||||
#include "SkPath.h"
|
||||
|
||||
class GrAtlas;
|
||||
|
||||
@ -26,7 +26,7 @@ struct GrGlyph {
|
||||
typedef uint32_t PackedID;
|
||||
|
||||
GrAtlas* fAtlas;
|
||||
GrPath* fPath;
|
||||
SkPath* fPath;
|
||||
PackedID fPackedID;
|
||||
GrIRect16 fBounds;
|
||||
GrIPoint16 fAtlasLocation;
|
||||
|
@ -36,6 +36,7 @@ public:
|
||||
bool fColorMatrixEnabled;
|
||||
|
||||
GrColor fColor;
|
||||
uint8_t fCoverage;
|
||||
|
||||
GrColor fColorFilterColor;
|
||||
SkXfermode::Mode fColorFilterXfermode;
|
||||
@ -126,23 +127,30 @@ public:
|
||||
fDither = paint.fDither;
|
||||
|
||||
fColor = paint.fColor;
|
||||
fCoverage = paint.fCoverage;
|
||||
|
||||
fColorFilterColor = paint.fColorFilterColor;
|
||||
fColorFilterXfermode = paint.fColorFilterXfermode;
|
||||
memcpy(fColorMatrix, paint.fColorMatrix, sizeof(fColorMatrix));
|
||||
fColorMatrixEnabled = paint.fColorMatrixEnabled;
|
||||
|
||||
if (fColorMatrixEnabled) {
|
||||
memcpy(fColorMatrix, paint.fColorMatrix, sizeof(fColorMatrix));
|
||||
}
|
||||
|
||||
for (int i = 0; i < kMaxTextures; ++i) {
|
||||
GrSafeUnref(fTextures[i]);
|
||||
fTextureSamplers[i] = paint.fTextureSamplers[i];
|
||||
fTextures[i] = paint.fTextures[i];
|
||||
GrSafeRef(fTextures[i]);
|
||||
if (NULL != fTextures[i]) {
|
||||
fTextureSamplers[i] = paint.fTextureSamplers[i];
|
||||
fTextures[i]->ref();
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < kMaxMasks; ++i) {
|
||||
GrSafeUnref(fMaskTextures[i]);
|
||||
fMaskSamplers[i] = paint.fMaskSamplers[i];
|
||||
fMaskTextures[i] = paint.fMaskTextures[i];
|
||||
GrSafeRef(fMaskTextures[i]);
|
||||
if (NULL != fMaskTextures[i]) {
|
||||
fMaskSamplers[i] = paint.fMaskSamplers[i];
|
||||
fMaskTextures[i]->ref();
|
||||
}
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
@ -161,6 +169,7 @@ public:
|
||||
this->resetBlend();
|
||||
this->resetOptions();
|
||||
this->resetColor();
|
||||
this->resetCoverage();
|
||||
this->resetTextures();
|
||||
this->resetColorFilter();
|
||||
this->resetMasks();
|
||||
@ -169,7 +178,6 @@ public:
|
||||
void resetColorFilter() {
|
||||
fColorFilterXfermode = SkXfermode::kDst_Mode;
|
||||
fColorFilterColor = GrColorPackRGBA(0xff, 0xff, 0xff, 0xff);
|
||||
memset(fColorMatrix, 0, sizeof(fColorMatrix));
|
||||
fColorMatrixEnabled = false;
|
||||
}
|
||||
|
||||
@ -242,6 +250,10 @@ private:
|
||||
fColor = GrColorPackRGBA(0xff, 0xff, 0xff, 0xff);
|
||||
}
|
||||
|
||||
void resetCoverage() {
|
||||
fCoverage = 0xff;
|
||||
}
|
||||
|
||||
void resetTextures() {
|
||||
for (int i = 0; i < kMaxTextures; ++i) {
|
||||
this->setTexture(i, NULL);
|
||||
|
118
gfx/skia/include/gpu/GrProgramStageFactory.h
Normal file
118
gfx/skia/include/gpu/GrProgramStageFactory.h
Normal file
@ -0,0 +1,118 @@
|
||||
/*
|
||||
* Copyright 2012 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#ifndef GrProgramStageFactory_DEFINED
|
||||
#define GrProgramStageFactory_DEFINED
|
||||
|
||||
#include "GrTypes.h"
|
||||
#include "SkTemplates.h"
|
||||
|
||||
/** Given a GrCustomStage of a particular type, creates the corresponding
|
||||
graphics-backend-specific GrProgramStage. Also tracks equivalence
|
||||
of shaders generated via a key.
|
||||
*/
|
||||
|
||||
class GrCustomStage;
|
||||
class GrGLProgramStage;
|
||||
|
||||
class GrProgramStageFactory : public GrNoncopyable {
|
||||
public:
|
||||
typedef uint16_t StageKey;
|
||||
enum {
|
||||
kProgramStageKeyBits = 10,
|
||||
};
|
||||
|
||||
virtual StageKey glStageKey(const GrCustomStage* stage) const = 0;
|
||||
virtual GrGLProgramStage* createGLInstance(
|
||||
const GrCustomStage* stage) const = 0;
|
||||
|
||||
bool operator ==(const GrProgramStageFactory& b) const {
|
||||
return fStageClassID == b.fStageClassID;
|
||||
}
|
||||
bool operator !=(const GrProgramStageFactory& b) const {
|
||||
return !(*this == b);
|
||||
}
|
||||
|
||||
virtual const char* name() const = 0;
|
||||
|
||||
protected:
|
||||
enum {
|
||||
kIllegalStageClassID = 0,
|
||||
};
|
||||
|
||||
GrProgramStageFactory() {
|
||||
fStageClassID = kIllegalStageClassID;
|
||||
}
|
||||
|
||||
static StageKey GenID() {
|
||||
// fCurrStageClassID has been initialized to kIllegalStageClassID. The
|
||||
// atomic inc returns the old value not the incremented value. So we add
|
||||
// 1 to the returned value.
|
||||
int32_t id = sk_atomic_inc(&fCurrStageClassID) + 1;
|
||||
GrAssert(id < (1 << (8 * sizeof(StageKey) - kProgramStageKeyBits)));
|
||||
return id;
|
||||
}
|
||||
|
||||
StageKey fStageClassID;
|
||||
|
||||
private:
|
||||
static int32_t fCurrStageClassID;
|
||||
};
|
||||
|
||||
template <typename StageClass>
|
||||
class GrTProgramStageFactory : public GrProgramStageFactory {
|
||||
|
||||
public:
|
||||
typedef typename StageClass::GLProgramStage GLProgramStage;
|
||||
|
||||
/** Returns a human-readable name that is accessible via GrCustomStage or
|
||||
GrGLProgramStage and is consistent between the two of them.
|
||||
*/
|
||||
virtual const char* name() const SK_OVERRIDE { return StageClass::Name(); }
|
||||
|
||||
/** Returns a value that idenitifes the GLSL shader code generated by
|
||||
a GrCustomStage. This enables caching of generated shaders. Part of the
|
||||
id identifies the GrCustomShader subclass. The remainder is based
|
||||
on the aspects of the GrCustomStage object's configuration that affect
|
||||
GLSL code generation. */
|
||||
virtual StageKey glStageKey(const GrCustomStage* stage) const SK_OVERRIDE {
|
||||
GrAssert(kIllegalStageClassID != fStageClassID);
|
||||
StageKey stageID = GLProgramStage::GenKey(stage);
|
||||
#if GR_DEBUG
|
||||
static const StageKey kIllegalIDMask =
|
||||
~((1 << kProgramStageKeyBits) - 1);
|
||||
GrAssert(!(kIllegalIDMask & stageID));
|
||||
#endif
|
||||
return fStageClassID | stageID;
|
||||
}
|
||||
|
||||
/** Returns a new instance of the appropriate *GL* implementation class
|
||||
for the given GrCustomStage; caller is responsible for deleting
|
||||
the object. */
|
||||
virtual GLProgramStage* createGLInstance(
|
||||
const GrCustomStage* stage) const SK_OVERRIDE {
|
||||
return new GLProgramStage(*this, stage);
|
||||
}
|
||||
|
||||
/** This class is a singleton. This function returns the single instance.
|
||||
*/
|
||||
static const GrProgramStageFactory& getInstance() {
|
||||
static SkAlignedSTStorage<1, GrTProgramStageFactory> gInstanceMem;
|
||||
static const GrTProgramStageFactory* gInstance;
|
||||
if (!gInstance) {
|
||||
gInstance = new (gInstanceMem.get()) GrTProgramStageFactory();
|
||||
}
|
||||
return *gInstance;
|
||||
}
|
||||
|
||||
protected:
|
||||
GrTProgramStageFactory() {
|
||||
fStageClassID = GenID() << kProgramStageKeyBits;
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
@ -112,6 +112,14 @@ public:
|
||||
*/
|
||||
const GrIRect& getResolveRect() const { return fResolveRect; }
|
||||
|
||||
/**
|
||||
* If the render target is multisampled this will perform a multisample
|
||||
* resolve. Any pending draws to the target are first flushed. This only
|
||||
* applies to render targets that are associated with GrTextures. After the
|
||||
* function returns the GrTexture will contain the resolved pixels.
|
||||
*/
|
||||
void resolve();
|
||||
|
||||
// GrResource overrides
|
||||
virtual size_t sizeInBytes() const;
|
||||
|
||||
|
@ -15,15 +15,11 @@
|
||||
class GrGpu;
|
||||
class GrContext;
|
||||
|
||||
/**
|
||||
* Base class for the GPU resources created by a GrContext.
|
||||
*/
|
||||
class GrResource : public GrRefCnt {
|
||||
public:
|
||||
explicit GrResource(GrGpu* gpu);
|
||||
|
||||
virtual ~GrResource() {
|
||||
// subclass should have released this.
|
||||
GrAssert(!isValid());
|
||||
}
|
||||
|
||||
/**
|
||||
* Frees the resource in the underlying 3D API. It must be safe to call this
|
||||
* when the resource has been previously abandoned.
|
||||
@ -59,26 +55,29 @@ public:
|
||||
/**
|
||||
* Retrieves the context that owns the resource. Note that it is possible
|
||||
* for this to return NULL. When resources have been release()ed or
|
||||
* abandon()ed they no longer have an unknowning context. Destroying a
|
||||
* abandon()ed they no longer have an owning context. Destroying a
|
||||
* GrContext automatically releases all its resources.
|
||||
*/
|
||||
const GrContext* getContext() const;
|
||||
GrContext* getContext();
|
||||
|
||||
protected:
|
||||
explicit GrResource(GrGpu* gpu);
|
||||
virtual ~GrResource();
|
||||
|
||||
GrGpu* getGpu() const { return fGpu; }
|
||||
|
||||
virtual void onRelease() = 0;
|
||||
virtual void onAbandon() = 0;
|
||||
|
||||
GrGpu* getGpu() const { return fGpu; }
|
||||
|
||||
private:
|
||||
GrResource(); // unimpl
|
||||
|
||||
GrGpu* fGpu; // not reffed. This can outlive the GrGpu.
|
||||
|
||||
friend class GrGpu; // GrGpu manages list of resources.
|
||||
|
||||
GrGpu* fGpu; // not reffed. The GrGpu can be deleted while there
|
||||
// are still live GrResources. It will call
|
||||
// release() on all such resources in its
|
||||
// destructor.
|
||||
GrResource* fNext; // dl-list of resources per-GrGpu
|
||||
GrResource* fPrevious;
|
||||
|
||||
|
@ -11,8 +11,9 @@
|
||||
#ifndef GrSamplerState_DEFINED
|
||||
#define GrSamplerState_DEFINED
|
||||
|
||||
#include "GrTypes.h"
|
||||
#include "GrCustomStage.h"
|
||||
#include "GrMatrix.h"
|
||||
#include "GrTypes.h"
|
||||
|
||||
#define MAX_KERNEL_WIDTH 25
|
||||
|
||||
@ -39,6 +40,14 @@ public:
|
||||
* Apply a separable convolution kernel.
|
||||
*/
|
||||
kConvolution_Filter,
|
||||
/**
|
||||
* Apply a dilate filter (max over a 1D radius).
|
||||
*/
|
||||
kDilate_Filter,
|
||||
/**
|
||||
* Apply an erode filter (min over a 1D radius).
|
||||
*/
|
||||
kErode_Filter,
|
||||
|
||||
kDefault_Filter = kNearest_Filter
|
||||
};
|
||||
@ -86,6 +95,17 @@ public:
|
||||
kDefault_WrapMode = kClamp_WrapMode
|
||||
};
|
||||
|
||||
/**
|
||||
* For the filters which perform more than one texture sample (convolution,
|
||||
* erode, dilate), this determines the direction in which the texture
|
||||
* coordinates will be incremented.
|
||||
*/
|
||||
enum FilterDirection {
|
||||
kX_FilterDirection,
|
||||
kY_FilterDirection,
|
||||
|
||||
kDefault_FilterDirection = kX_FilterDirection,
|
||||
};
|
||||
/**
|
||||
* Default sampler state is set to clamp, use normal sampling mode, be
|
||||
* unfiltered, and use identity matrix.
|
||||
@ -93,12 +113,40 @@ public:
|
||||
GrSamplerState()
|
||||
: fRadial2CenterX1()
|
||||
, fRadial2Radius0()
|
||||
, fRadial2PosRoot() {
|
||||
, fRadial2PosRoot()
|
||||
, fCustomStage (NULL) {
|
||||
this->reset();
|
||||
}
|
||||
|
||||
~GrSamplerState() {
|
||||
GrSafeUnref(fCustomStage);
|
||||
}
|
||||
|
||||
bool operator ==(const GrSamplerState& s) const {
|
||||
/* We must be bit-identical as far as the CustomStage;
|
||||
there may be multiple CustomStages that will produce
|
||||
the same shader code and so are equivalent.
|
||||
Can't take the address of fWrapX because it's :8 */
|
||||
int bitwiseRegion = (intptr_t) &fCustomStage - (intptr_t) this;
|
||||
GrAssert(sizeof(GrSamplerState) ==
|
||||
bitwiseRegion + sizeof(fCustomStage));
|
||||
return !memcmp(this, &s, bitwiseRegion) &&
|
||||
((fCustomStage == s.fCustomStage) ||
|
||||
(fCustomStage && s.fCustomStage &&
|
||||
(fCustomStage->getFactory() ==
|
||||
s.fCustomStage->getFactory()) &&
|
||||
fCustomStage->isEqual(s.fCustomStage)));
|
||||
}
|
||||
bool operator !=(const GrSamplerState& s) const { return !(*this == s); }
|
||||
|
||||
GrSamplerState& operator =(const GrSamplerState s) {
|
||||
memcpy(this, &s, sizeof(GrSamplerState));
|
||||
return *this;
|
||||
}
|
||||
|
||||
WrapMode getWrapX() const { return fWrapX; }
|
||||
WrapMode getWrapY() const { return fWrapY; }
|
||||
FilterDirection getFilterDirection() const { return fFilterDirection; }
|
||||
SampleMode getSampleMode() const { return fSampleMode; }
|
||||
const GrMatrix& getMatrix() const { return fMatrix; }
|
||||
const GrRect& getTextureDomain() const { return fTextureDomain; }
|
||||
@ -106,7 +154,6 @@ public:
|
||||
Filter getFilter() const { return fFilter; }
|
||||
int getKernelWidth() const { return fKernelWidth; }
|
||||
const float* getKernel() const { return fKernel; }
|
||||
const float* getImageIncrement() const { return fImageIncrement; }
|
||||
bool swapsRAndB() const { return fSwapRAndB; }
|
||||
|
||||
bool isGradient() const {
|
||||
@ -118,6 +165,7 @@ public:
|
||||
void setWrapX(WrapMode mode) { fWrapX = mode; }
|
||||
void setWrapY(WrapMode mode) { fWrapY = mode; }
|
||||
void setSampleMode(SampleMode mode) { fSampleMode = mode; }
|
||||
void setFilterDirection(FilterDirection mode) { fFilterDirection = mode; }
|
||||
|
||||
/**
|
||||
* Access the sampler's matrix. See SampleMode for explanation of
|
||||
@ -158,24 +206,30 @@ public:
|
||||
|
||||
void reset(WrapMode wrapXAndY,
|
||||
Filter filter,
|
||||
FilterDirection direction,
|
||||
const GrMatrix& matrix) {
|
||||
fWrapX = wrapXAndY;
|
||||
fWrapY = wrapXAndY;
|
||||
fSampleMode = kDefault_SampleMode;
|
||||
fFilter = filter;
|
||||
fFilterDirection = direction;
|
||||
fMatrix = matrix;
|
||||
fTextureDomain.setEmpty();
|
||||
fSwapRAndB = false;
|
||||
GrSafeSetNull(fCustomStage);
|
||||
}
|
||||
void reset(WrapMode wrapXAndY, Filter filter, const GrMatrix& matrix) {
|
||||
this->reset(wrapXAndY, filter, kDefault_FilterDirection, matrix);
|
||||
}
|
||||
void reset(WrapMode wrapXAndY,
|
||||
Filter filter) {
|
||||
this->reset(wrapXAndY, filter, GrMatrix::I());
|
||||
this->reset(wrapXAndY, filter, kDefault_FilterDirection, GrMatrix::I());
|
||||
}
|
||||
void reset(const GrMatrix& matrix) {
|
||||
this->reset(kDefault_WrapMode, kDefault_Filter, matrix);
|
||||
this->reset(kDefault_WrapMode, kDefault_Filter, kDefault_FilterDirection, matrix);
|
||||
}
|
||||
void reset() {
|
||||
this->reset(kDefault_WrapMode, kDefault_Filter, GrMatrix::I());
|
||||
this->reset(kDefault_WrapMode, kDefault_Filter, kDefault_FilterDirection, GrMatrix::I());
|
||||
}
|
||||
|
||||
GrScalar getRadial2CenterX1() const { return fRadial2CenterX1; }
|
||||
@ -198,37 +252,44 @@ public:
|
||||
fRadial2PosRoot = posRoot;
|
||||
}
|
||||
|
||||
void setConvolutionParams(int kernelWidth, const float* kernel, float imageIncrement[2]) {
|
||||
void setConvolutionParams(int kernelWidth, const float* kernel) {
|
||||
GrAssert(kernelWidth >= 0 && kernelWidth <= MAX_KERNEL_WIDTH);
|
||||
fKernelWidth = kernelWidth;
|
||||
if (NULL != kernel) {
|
||||
memcpy(fKernel, kernel, kernelWidth * sizeof(float));
|
||||
}
|
||||
if (NULL != imageIncrement) {
|
||||
memcpy(fImageIncrement, imageIncrement, sizeof(fImageIncrement));
|
||||
} else {
|
||||
memset(fImageIncrement, 0, sizeof(fImageIncrement));
|
||||
}
|
||||
}
|
||||
|
||||
void setMorphologyRadius(int radius) {
|
||||
GrAssert(radius >= 0 && radius <= MAX_KERNEL_WIDTH);
|
||||
fKernelWidth = radius;
|
||||
}
|
||||
|
||||
void setCustomStage(GrCustomStage* stage) {
|
||||
GrSafeAssign(fCustomStage, stage);
|
||||
}
|
||||
GrCustomStage* getCustomStage() const { return fCustomStage; }
|
||||
|
||||
private:
|
||||
WrapMode fWrapX : 8;
|
||||
WrapMode fWrapY : 8;
|
||||
SampleMode fSampleMode : 8;
|
||||
Filter fFilter : 8;
|
||||
GrMatrix fMatrix;
|
||||
bool fSwapRAndB;
|
||||
GrRect fTextureDomain;
|
||||
WrapMode fWrapX : 8;
|
||||
WrapMode fWrapY : 8;
|
||||
FilterDirection fFilterDirection : 8;
|
||||
SampleMode fSampleMode : 8;
|
||||
Filter fFilter : 8;
|
||||
GrMatrix fMatrix;
|
||||
bool fSwapRAndB;
|
||||
GrRect fTextureDomain;
|
||||
|
||||
// these are undefined unless fSampleMode == kRadial2_SampleMode
|
||||
GrScalar fRadial2CenterX1;
|
||||
GrScalar fRadial2Radius0;
|
||||
SkBool8 fRadial2PosRoot;
|
||||
GrScalar fRadial2CenterX1;
|
||||
GrScalar fRadial2Radius0;
|
||||
SkBool8 fRadial2PosRoot;
|
||||
|
||||
// These are undefined unless fFilter == kConvolution_Filter
|
||||
uint8_t fKernelWidth;
|
||||
float fImageIncrement[2];
|
||||
float fKernel[MAX_KERNEL_WIDTH];
|
||||
uint8_t fKernelWidth;
|
||||
float fKernel[MAX_KERNEL_WIDTH];
|
||||
|
||||
GrCustomStage* fCustomStage;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -12,57 +12,104 @@
|
||||
#define GrTextContext_DEFINED
|
||||
|
||||
#include "GrGlyph.h"
|
||||
#include "GrPaint.h"
|
||||
#include "GrMatrix.h"
|
||||
#include "GrRefCnt.h"
|
||||
|
||||
struct GrGpuTextVertex;
|
||||
class GrContext;
|
||||
class GrTextStrike;
|
||||
class GrFontScaler;
|
||||
class GrDrawTarget;
|
||||
class GrPaint;
|
||||
|
||||
class GrTextContext {
|
||||
public:
|
||||
GrTextContext(GrContext*,
|
||||
const GrPaint& paint,
|
||||
const GrMatrix* extMatrix = NULL);
|
||||
~GrTextContext();
|
||||
class SkGpuDevice;
|
||||
class SkPaint;
|
||||
|
||||
void drawPackedGlyph(GrGlyph::PackedID, GrFixed left, GrFixed top,
|
||||
GrFontScaler*);
|
||||
|
||||
void flush(); // optional; automatically called by destructor
|
||||
|
||||
private:
|
||||
GrPaint fPaint;
|
||||
GrVertexLayout fVertexLayout;
|
||||
/**
|
||||
* Derived classes can use stages GrPaint::kTotalStages through
|
||||
* GrDrawState::kNumStages-1. The stages before GrPaint::kTotalStages
|
||||
* are reserved for setting up the draw (i.e., textures and filter masks).
|
||||
*/
|
||||
class GrTextContext: public GrRefCnt {
|
||||
protected:
|
||||
GrContext* fContext;
|
||||
GrDrawTarget* fDrawTarget;
|
||||
|
||||
GrMatrix fExtMatrix;
|
||||
GrFontScaler* fScaler;
|
||||
GrTextStrike* fStrike;
|
||||
public:
|
||||
/**
|
||||
* To use a text context it must be wrapped in an AutoFinish. AutoFinish's
|
||||
* destructor ensures all drawing is flushed to the GrContext.
|
||||
*/
|
||||
class AutoFinish {
|
||||
public:
|
||||
AutoFinish(GrTextContext* textContext, GrContext* context,
|
||||
const GrPaint&, const GrMatrix* extMatrix);
|
||||
~AutoFinish();
|
||||
GrTextContext* getTextContext() const;
|
||||
|
||||
inline void flushGlyphs();
|
||||
void setupDrawTarget();
|
||||
|
||||
enum {
|
||||
kMinRequestedGlyphs = 1,
|
||||
kDefaultRequestedGlyphs = 64,
|
||||
kMinRequestedVerts = kMinRequestedGlyphs * 4,
|
||||
kDefaultRequestedVerts = kDefaultRequestedGlyphs * 4,
|
||||
private:
|
||||
GrTextContext* fTextContext;
|
||||
};
|
||||
|
||||
GrGpuTextVertex* fVertices;
|
||||
virtual void drawPackedGlyph(GrGlyph::PackedID, GrFixed left, GrFixed top,
|
||||
GrFontScaler*) = 0;
|
||||
|
||||
int32_t fMaxVertices;
|
||||
GrTexture* fCurrTexture;
|
||||
int fCurrVertex;
|
||||
virtual ~GrTextContext() {}
|
||||
|
||||
GrIRect fClipRect;
|
||||
GrMatrix fOrigViewMatrix; // restore previous viewmatrix
|
||||
protected:
|
||||
GrTextContext() {
|
||||
fContext = NULL;
|
||||
}
|
||||
|
||||
bool isValid() const {
|
||||
return (NULL != fContext);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the object.
|
||||
*
|
||||
* Before call to this method, the instance is considered to be in
|
||||
* invalid state. I.e. call to any method other than isValid will result in
|
||||
* undefined behaviour.
|
||||
*
|
||||
* @see finish
|
||||
*/
|
||||
virtual void init(GrContext* context, const GrPaint&,
|
||||
const GrMatrix* extMatrix) {
|
||||
fContext = context;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset the object to invalid state.
|
||||
*
|
||||
* After call to this method, the instance is considered to be in
|
||||
* invalid state.
|
||||
*
|
||||
* It might be brought back to a valid state by calling init.
|
||||
*
|
||||
* @see init
|
||||
*/
|
||||
virtual void finish() {
|
||||
fContext = NULL;
|
||||
}
|
||||
|
||||
private:
|
||||
typedef GrRefCnt INHERITED;
|
||||
};
|
||||
|
||||
inline GrTextContext::AutoFinish::AutoFinish(GrTextContext* textContext,
|
||||
GrContext* context,
|
||||
const GrPaint& grPaint,
|
||||
const GrMatrix* extMatrix) {
|
||||
GrAssert(NULL != textContext);
|
||||
fTextContext = textContext;
|
||||
fTextContext->ref();
|
||||
fTextContext->init(context, grPaint, extMatrix);
|
||||
}
|
||||
|
||||
inline GrTextContext::AutoFinish::~AutoFinish() {
|
||||
fTextContext->finish();
|
||||
fTextContext->unref();
|
||||
}
|
||||
|
||||
inline GrTextContext* GrTextContext::AutoFinish::getTextContext() const {
|
||||
return fTextContext;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -79,7 +79,7 @@ public:
|
||||
* @param height height of rectangle to write in pixels.
|
||||
* @param config the pixel config of the source buffer
|
||||
* @param buffer memory to read pixels from
|
||||
* @param rowBytes number of bytes bewtween consecutive rows. Zero
|
||||
* @param rowBytes number of bytes between consecutive rows. Zero
|
||||
* means rows are tightly packed.
|
||||
*/
|
||||
void writePixels(int left, int top, int width, int height,
|
||||
@ -94,6 +94,7 @@ public:
|
||||
* render target
|
||||
*/
|
||||
GrRenderTarget* asRenderTarget() { return fRenderTarget; }
|
||||
const GrRenderTarget* asRenderTarget() const { return fRenderTarget; }
|
||||
|
||||
/**
|
||||
* Removes the reference on the associated GrRenderTarget held by this
|
||||
|
@ -225,6 +225,8 @@ static inline bool GrIsPrimTypeTris(GrPrimitiveType type) {
|
||||
* Coeffecients for alpha-blending.
|
||||
*/
|
||||
enum GrBlendCoeff {
|
||||
kInvalid_BlendCoeff = -1,
|
||||
|
||||
kZero_BlendCoeff, //<! 0
|
||||
kOne_BlendCoeff, //<! 1
|
||||
kSC_BlendCoeff, //<! src color
|
||||
@ -301,6 +303,8 @@ enum GrPixelConfig {
|
||||
* Unpremultiplied. Byte order is b,g,r,a
|
||||
*/
|
||||
kBGRA_8888_UPM_GrPixelConfig,
|
||||
|
||||
kGrPixelConfigCount
|
||||
};
|
||||
|
||||
// Aliases for pixel configs that match skia's byte order
|
||||
@ -319,11 +323,6 @@ enum GrPixelConfig {
|
||||
#error "SK_*32_SHIFT values must correspond to GL_BGRA or GL_RGBA format."
|
||||
#endif
|
||||
|
||||
// WebKit is relying on this old name for the native skia PM config. This will
|
||||
// be deleted ASAP because it is so similar to kRGBA_PM_8888_GrPixelConfig but
|
||||
// has a different interpretation when skia is compiled BGRA.
|
||||
static const GrPixelConfig kRGBA_8888_GrPixelConfig = kSkia8888_PM_GrPixelConfig;
|
||||
|
||||
// Returns true if the pixel config has 8bit r,g,b,a components in that byte
|
||||
// order
|
||||
static inline bool GrPixelConfigIsRGBA8888(GrPixelConfig config) {
|
||||
@ -428,20 +427,6 @@ static inline bool GrPixelConfigIsAlphaOnly(GrPixelConfig config) {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Used to control the level of antialiasing available for a rendertarget.
|
||||
* Anti-alias quality levels depend on the underlying API/GPU capabilities.
|
||||
*/
|
||||
enum GrAALevels {
|
||||
kNone_GrAALevel, //<! No antialiasing available.
|
||||
kLow_GrAALevel, //<! Low quality antialiased rendering. Actual
|
||||
// interpretation is platform-dependent.
|
||||
kMed_GrAALevel, //<! Medium quality antialiased rendering. Actual
|
||||
// interpretation is platform-dependent.
|
||||
kHigh_GrAALevel, //<! High quality antialiased rendering. Actual
|
||||
// interpretation is platform-dependent.
|
||||
};
|
||||
|
||||
/**
|
||||
* Optional bitfield flags that can be passed to createTexture.
|
||||
*/
|
||||
@ -479,30 +464,23 @@ enum {
|
||||
*/
|
||||
struct GrTextureDesc {
|
||||
GrTextureFlags fFlags; //!< bitfield of TextureFlags
|
||||
/**
|
||||
* The level of antialiasing available for a rendertarget texture. Only used
|
||||
* fFlags contains kRenderTarget_GrTextureFlag.
|
||||
*/
|
||||
GrAALevels fAALevel;
|
||||
int fWidth; //!< Width of the texture
|
||||
int fHeight; //!< Height of the texture
|
||||
|
||||
/**
|
||||
* Format of source data of the texture. Not guaraunteed to be the same as
|
||||
* internal format used by 3D API.
|
||||
*/
|
||||
GrPixelConfig fConfig;
|
||||
};
|
||||
|
||||
/**
|
||||
* Set Operations used to construct clips.
|
||||
*/
|
||||
enum GrSetOp {
|
||||
kReplace_SetOp,
|
||||
kIntersect_SetOp,
|
||||
kUnion_SetOp,
|
||||
kXor_SetOp,
|
||||
kDifference_SetOp,
|
||||
kReverseDifference_SetOp,
|
||||
/**
|
||||
* The number of samples per pixel or 0 to disable full scene AA. This only
|
||||
* applies if the kRenderTarget_GrTextureFlagBit is set. The actual number
|
||||
* of samples may not exactly match the request. The request will be rounded
|
||||
* up to the next supported sample count, or down if it is larger than the
|
||||
* max supportex count.
|
||||
*/
|
||||
int fSampleCnt;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -593,24 +571,6 @@ static inline bool GrIsFillInverted(GrPathFill fill) {
|
||||
return gIsFillInverted[fill];
|
||||
}
|
||||
|
||||
/**
|
||||
* Hints provided about a path's convexity (or lack thereof).
|
||||
*/
|
||||
enum GrConvexHint {
|
||||
kNone_ConvexHint, //<! No hint about convexity
|
||||
// of the path
|
||||
kConvex_ConvexHint, //<! Path is one convex piece
|
||||
kNonOverlappingConvexPieces_ConvexHint, //<! Multiple convex pieces,
|
||||
// pieces are known to be
|
||||
// disjoint
|
||||
kSameWindingConvexPieces_ConvexHint, //<! Multiple convex pieces,
|
||||
// may or may not intersect,
|
||||
// either all wind cw or all
|
||||
// wind ccw.
|
||||
kConcave_ConvexHint //<! Path is known to be
|
||||
// concave
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// opaque type for 3D API object handles
|
||||
@ -702,133 +662,6 @@ struct GrPlatformRenderTargetDesc {
|
||||
GrPlatform3DObject fRenderTargetHandle;
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// DEPRECATED. createPlatformSurface is replaced by createPlatformTexture
|
||||
// and createPlatformRenderTarget. These enums and structs will be removed.
|
||||
|
||||
enum GrPlatformSurfaceType {
|
||||
/**
|
||||
* Specifies that the object being created is a render target.
|
||||
*/
|
||||
kRenderTarget_GrPlatformSurfaceType,
|
||||
/**
|
||||
* Specifies that the object being created is a texture.
|
||||
*/
|
||||
kTexture_GrPlatformSurfaceType,
|
||||
/**
|
||||
* Specifies that the object being created is a texture and a render
|
||||
* target.
|
||||
*/
|
||||
kTextureRenderTarget_GrPlatformSurfaceType,
|
||||
};
|
||||
|
||||
enum GrPlatformRenderTargetFlags {
|
||||
kNone_GrPlatformRenderTargetFlagBit = 0x0,
|
||||
|
||||
/**
|
||||
* Gives permission to Gr to perform the downsample-resolve of a
|
||||
* multisampled render target. If this is not set then read pixel
|
||||
* operations may fail. If the object is both a texture and render target
|
||||
* then this *must* be set. Otherwise, if the client wants do its own
|
||||
* resolves it must create separate GrRenderTarget and GrTexture objects
|
||||
* and insert appropriate flushes and resolves betweeen data hazards.
|
||||
* GrRenderTarget has a flagForResolve()
|
||||
*/
|
||||
kGrCanResolve_GrPlatformRenderTargetFlagBit = 0x2,
|
||||
};
|
||||
|
||||
GR_MAKE_BITFIELD_OPS(GrPlatformRenderTargetFlags)
|
||||
|
||||
struct GrPlatformSurfaceDesc {
|
||||
GrPlatformSurfaceType fSurfaceType; // type of surface to create
|
||||
/**
|
||||
* Flags for kRenderTarget and kTextureRenderTarget surface types
|
||||
*/
|
||||
GrPlatformRenderTargetFlags fRenderTargetFlags;
|
||||
|
||||
int fWidth; // width in pixels
|
||||
int fHeight; // height in pixels
|
||||
GrPixelConfig fConfig; // color format
|
||||
/**
|
||||
* Number of per sample stencil buffer. Only relevant if kIsRenderTarget is
|
||||
* set in fFlags.
|
||||
*/
|
||||
int fStencilBits;
|
||||
|
||||
/**
|
||||
* Number of samples per-pixel. Only relevant if kIsRenderTarget is set in
|
||||
* fFlags.
|
||||
*/
|
||||
int fSampleCnt;
|
||||
|
||||
/**
|
||||
* Texture object in 3D API. Only relevant if fSurfaceType is kTexture or
|
||||
* kTextureRenderTarget.
|
||||
* GL: this is a texture object (glGenTextures)
|
||||
*/
|
||||
GrPlatform3DObject fPlatformTexture;
|
||||
/**
|
||||
* Render target object in 3D API. Only relevant if fSurfaceType is
|
||||
* kRenderTarget or kTextureRenderTarget
|
||||
* GL: this is a FBO object (glGenFramebuffers)
|
||||
*/
|
||||
GrPlatform3DObject fPlatformRenderTarget;
|
||||
/**
|
||||
* 3D API object used as destination of resolve. Only relevant if
|
||||
* fSurfaceType is kRenderTarget or kTextureRenderTarget and
|
||||
* kGrCanResolve is set in fRenderTargetFlags.
|
||||
* fFlags.
|
||||
* GL: this is a FBO object (glGenFramebuffers)
|
||||
*/
|
||||
GrPlatform3DObject fPlatformResolveDestination;
|
||||
|
||||
void reset() { memset(this, 0, sizeof(GrPlatformSurfaceDesc)); }
|
||||
};
|
||||
|
||||
/**
|
||||
* Example of how to wrap render-to-texture-with-MSAA GL objects with a GrPlatformSurace
|
||||
*
|
||||
* GLint colorBufferID;
|
||||
* glGenRenderbuffers(1, &colorID);
|
||||
* glBindRenderbuffer(GL_RENDERBUFFER, colorBufferID);
|
||||
* glRenderbufferStorageMultisample(GL_RENDERBUFFER, S, GL_RGBA, W, H);
|
||||
*
|
||||
* GLint stencilBufferID;
|
||||
* glGenRenderBuffers(1, &stencilBufferID);
|
||||
* glBindRenderbuffer(GL_RENDERBUFFER, stencilBufferID);
|
||||
* glRenderbufferStorageMultisample(GL_RENDERBUFFER, S, GL_STENCIL_INDEX8, W, H);
|
||||
*
|
||||
* GLint drawFBOID;
|
||||
* glGenFramebuffers(1, &drawFBOID);
|
||||
* glBindFramebuffer(GL_FRAMEBUFFER, drawFBOID);
|
||||
* glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, colorBufferID);
|
||||
* glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_RENDERBUFFER, stencilBufferID);
|
||||
*
|
||||
* GLint textureID;
|
||||
* glGenTextures(1, &textureID);
|
||||
* glBindTexture(GL_TEXTURE_2D, textureID);
|
||||
* glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, W, H, ...);
|
||||
*
|
||||
* GLint readFBOID;
|
||||
* glGenFramebuffers(1, &readFBOID);
|
||||
* glBindFramebuffer(GL_FRAMEBUFFER, readFBOID);
|
||||
* glFramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, textureID, 0);
|
||||
*
|
||||
* GrPlatformSurfaceDesc renderTargetTextureDesc;
|
||||
* renderTargetTextureDesc.fSurfaceType = kTextureRenderTarget_GrPlatformSurfaceType;
|
||||
* renderTargetTextureDesc.fRenderTargetFlags = kGrCanResolve_GrPlatformRenderTargetFlagBit;
|
||||
* renderTargetTextureDesc.fWidth = W;
|
||||
* renderTargetTextureDesc.fHeight = H;
|
||||
* renderTargetTextureDesc.fConfig = kSkia8888_PM_GrPixelConfig
|
||||
* renderTargetTextureDesc.fStencilBits = 8;
|
||||
* renderTargetTextureDesc.fSampleCnt = S;
|
||||
* renderTargetTextureDesc.fPlatformTexture = textureID;
|
||||
* renderTargetTextureDesc.fPlatformRenderTarget = drawFBOID;
|
||||
* renderTargetTextureDesc.fPlatformResolveDestination = readFBOID;
|
||||
*
|
||||
* GrTexture* texture = static_cast<GrTexture*>(grContext->createPlatrformSurface(renderTargetTextureDesc));
|
||||
*/
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
@ -29,14 +29,11 @@ class SK_API SkGpuDevice : public SkDevice {
|
||||
public:
|
||||
/**
|
||||
* New device that will create an offscreen renderTarget based on the
|
||||
* config, width, height.
|
||||
*
|
||||
* usage is a special flag that should only be set by SkCanvas
|
||||
* internally.
|
||||
* config, width, height. The device's storage will not count against
|
||||
* the GrContext's texture cache budget. The device's pixels will be
|
||||
* uninitialized.
|
||||
*/
|
||||
SkGpuDevice(GrContext*, SkBitmap::Config,
|
||||
int width, int height,
|
||||
SkDevice::Usage usage = SkDevice::kGeneral_Usage);
|
||||
SkGpuDevice(GrContext*, SkBitmap::Config, int width, int height);
|
||||
|
||||
/**
|
||||
* New device that will render to the specified renderTarget.
|
||||
@ -102,7 +99,7 @@ public:
|
||||
const SkPaint&) SK_OVERRIDE;
|
||||
virtual bool filterTextFlags(const SkPaint&, TextFlags*) SK_OVERRIDE;
|
||||
|
||||
virtual void flush();
|
||||
virtual void flush();
|
||||
|
||||
/**
|
||||
* Make's this device's rendertarget current in the underlying 3D API.
|
||||
@ -110,47 +107,25 @@ public:
|
||||
*/
|
||||
virtual void makeRenderTargetCurrent();
|
||||
|
||||
virtual bool filterImage(SkImageFilter*, const SkBitmap& src,
|
||||
const SkMatrix& ctm,
|
||||
SkBitmap* result, SkIPoint* offset) SK_OVERRIDE;
|
||||
|
||||
virtual bool canHandleImageFilter(SkImageFilter*) SK_OVERRIDE;
|
||||
virtual bool filterImage(SkImageFilter*, const SkBitmap&, const SkMatrix&,
|
||||
SkBitmap*, SkIPoint*) SK_OVERRIDE;
|
||||
|
||||
class SkAutoCachedTexture; // used internally
|
||||
|
||||
protected:
|
||||
typedef GrContext::TextureCacheEntry TexCache;
|
||||
enum TexType {
|
||||
kBitmap_TexType,
|
||||
kDeviceRenderTarget_TexType,
|
||||
kSaveLayerDeviceRenderTarget_TexType
|
||||
};
|
||||
TexCache lockCachedTexture(const SkBitmap& bitmap,
|
||||
const GrSamplerState* sampler,
|
||||
TexType type = kBitmap_TexType);
|
||||
const GrSamplerState* sampler);
|
||||
bool isBitmapInTextureCache(const SkBitmap& bitmap,
|
||||
const GrSamplerState& sampler) const;
|
||||
void unlockCachedTexture(TexCache);
|
||||
|
||||
class SkAutoCachedTexture {
|
||||
public:
|
||||
SkAutoCachedTexture();
|
||||
SkAutoCachedTexture(SkGpuDevice* device,
|
||||
const SkBitmap& bitmap,
|
||||
const GrSamplerState* sampler,
|
||||
GrTexture** texture);
|
||||
~SkAutoCachedTexture();
|
||||
|
||||
GrTexture* set(SkGpuDevice*, const SkBitmap&, const GrSamplerState*);
|
||||
|
||||
private:
|
||||
SkGpuDevice* fDevice;
|
||||
TexCache fTex;
|
||||
};
|
||||
friend class SkAutoTexCache;
|
||||
|
||||
// overrides from SkDevice
|
||||
virtual bool onReadPixels(const SkBitmap& bitmap,
|
||||
int x, int y,
|
||||
SkCanvas::Config8888 config8888) SK_OVERRIDE;
|
||||
|
||||
|
||||
private:
|
||||
GrContext* fContext;
|
||||
|
||||
@ -163,38 +138,26 @@ private:
|
||||
bool fNeedClear;
|
||||
bool fNeedPrepareRenderTarget;
|
||||
|
||||
GrTextContext* fTextContext;
|
||||
|
||||
// called from rt and tex cons
|
||||
void initFromRenderTarget(GrContext*, GrRenderTarget*);
|
||||
|
||||
// doesn't set the texture/sampler/matrix state
|
||||
// caller needs to null out GrPaint's texture if
|
||||
// non-textured drawing is desired.
|
||||
// Set constantColor to true if a constant color
|
||||
// will be used. This is an optimization, and can
|
||||
// always be set to false. constantColor should
|
||||
// never be true if justAlpha is true.
|
||||
bool skPaint2GrPaintNoShader(const SkPaint& skPaint,
|
||||
bool justAlpha,
|
||||
GrPaint* grPaint,
|
||||
bool constantColor);
|
||||
// used by createCompatibleDevice
|
||||
SkGpuDevice(GrContext*, GrTexture* texture, TexCache, bool needClear);
|
||||
|
||||
// uses the SkShader to setup paint, act used to
|
||||
// hold lock on cached texture and free it when
|
||||
// destroyed.
|
||||
// If there is no shader, constantColor will
|
||||
// be passed to skPaint2GrPaintNoShader. Otherwise
|
||||
// it is ignored.
|
||||
bool skPaint2GrPaintShader(const SkPaint& skPaint,
|
||||
SkAutoCachedTexture* act,
|
||||
const SkMatrix& ctm,
|
||||
GrPaint* grPaint,
|
||||
bool constantColor);
|
||||
// overrides from SkDevice
|
||||
virtual void postSave() SK_OVERRIDE {
|
||||
fContext->postClipPush();
|
||||
}
|
||||
virtual void preRestore() SK_OVERRIDE {
|
||||
fContext->preClipPop();
|
||||
}
|
||||
|
||||
// override from SkDevice
|
||||
virtual SkDevice* onCreateCompatibleDevice(SkBitmap::Config config,
|
||||
int width, int height,
|
||||
virtual SkDevice* onCreateCompatibleDevice(SkBitmap::Config config,
|
||||
int width, int height,
|
||||
bool isOpaque,
|
||||
Usage usage);
|
||||
Usage usage) SK_OVERRIDE;
|
||||
|
||||
SkDrawProcs* initDrawForText(GrTextContext*);
|
||||
bool bindDeviceAsTexture(GrPaint* paint);
|
||||
@ -207,6 +170,11 @@ private:
|
||||
void internalDrawBitmap(const SkDraw&, const SkBitmap&,
|
||||
const SkIRect&, const SkMatrix&, GrPaint* grPaint);
|
||||
|
||||
/**
|
||||
* Returns non-initialized instance.
|
||||
*/
|
||||
GrTextContext* getTextContext();
|
||||
|
||||
typedef SkDevice INHERITED;
|
||||
};
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user