scalegesturedetector使用 何时触发onscalebegin

Android(18)
参考文章:
一、最原始的单点拖拽和两点缩放
原理:对于常规的控件触控操作,在setOnTouchListener()接口中,实现 onTouchEvent()方法来处理。
代码清单:
package com.example.
import android.os.B
import android.annotation.SuppressL
import android.annotation.TargetA
import android.app.A
import android.graphics.M
import android.graphics.PointF;
import android.view.GestureD
import android.view.M
import android.view.MotionE
import android.view.V
import android.view.View.OnTouchL
import android.widget.ImageV
import android.widget.T
import android.view.GestureDetector.OnGestureL
public class MainActivity extends Activity implements OnTouchListener{
public ImageView myImageV
private static final int NONE = 0;
private static final int DRAG = 1;
private static final int ZOOM = 2;
private int mode = NONE;
private Matrix tmpMatrix=new Matrix();;
private Matrix savedMatrix = new Matrix();
private PointF startPoint = new PointF();
private PointF endPoint=new PointF();
private PointF midPoint = new PointF();
private float oldD
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
myImageView=(ImageView)findViewById(R.id.myImageView);
myImageView.setOnTouchListener(this);
public boolean onCreateOptionsMenu(Menu menu) {
getMenuInflater().inflate(R.menu.activity_main, menu);
return true;
public boolean onTouch(View v, MotionEvent event) {
int pointCount = event.getPointerCount();
switch(event.getAction() & MotionEvent.ACTION_MASK){
case MotionEvent.ACTION_DOWN:
startPoint.set(event.getX(), event.getY());
tmpMatrix.set(myImageView.getImageMatrix());
savedMatrix.set(tmpMatrix);
mode = DRAG;
case MotionEvent.ACTION_POINTER_DOWN:
oldDistance = (float) Math.sqrt((event.getX(0) - event.getX(1)) * (event.getX(0) - event.getX(1)) + (event.getY(0) - event.getY(1)) * (event.getY(0) - event.getY(1)));
if (oldDistance & 10f)
savedMatrix.set(tmpMatrix);
midPoint.set((event.getX(0) + event.getX(1))/2, (event.getY(0) + event.getY(1))/2);
mode = ZOOM;
case MotionEvent.ACTION_MOVE:
if (mode == DRAG) {
tmpMatrix.set(savedMatrix);
tmpMatrix.postTranslate(event.getX() - startPoint.x, event.getY()
- startPoint.y);
else if (mode == ZOOM)
float newDist =
(float) Math.sqrt((event.getX(0) - event.getX(1)) * (event.getX(0) - event.getX(1)) + (event.getY(0) - event.getY(1)) * (event.getY(0) - event.getY(1)));
if (newDist & 10f)
tmpMatrix.set(savedMatrix);
float scale = newDist / oldD
tmpMatrix.postScale(scale, scale, midPoint.x, midPoint.y);
case MotionEvent.ACTION_UP:
case MotionEvent.ACTION_POINTER_UP:
mode = NONE;
myImageView.setImageMatrix(tmpMatrix);
return true;
代码解释:MainActivity实现OnTouchLietener的接口,将ImageView的触控 监听器设置为this,在重载函数OnTouch中实现对触控事件的处理。
这里的图像的位置和大小的变化都用到了矩阵运算,不太清楚的话可以先补充一下线性代数的知识。
拖拽的实现就是用矩阵记录手指移动的距离;缩放的时候,首先要记录两只手指最开始的距离,然后当手指移动的时候,实时计算出手指的距离,与之前的距离相除得到缩放的比例,然后用矩阵的scale方法存储。
二、手势识别
上面的例子虽然实现了基本的触控功能,而且低版本的系统也能很好的支持,但如果遇到了高级的触控事件,比如双击,长按之类,实现起来就非常麻烦了!
好在后续版本的api提供了更加棒的接口,我们可以很简单地来实现想要的效果。
这里要用到的是Android给我们提供的手势识别工具GestureDetector,需要2.2及以上的系统版本。下面的例子实现的效果是:单点拖拽,滑动切换imageView的内容,两点缩放,双击图像改变图像显示状态。
函数的最后调用 setImageMatrix()来实现对TextView的缩放或移动。
package com.example.
import java.util.R
import android.os.B
import android.app.A
import android.graphics.M
import android.graphics.PointF;
import android.view.GestureD
import android.view.GestureDetector.SimpleOnGestureL
import android.view.M
import android.view.MotionE
import android.view.ScaleGestureD
import android.view.ScaleGestureDetector.OnScaleGestureL
import android.view.V
import android.widget.ImageV
import android.widget.T
public class MainActivity extends Activity {
private GestureDetector myD
private ImageView myImageV
private ScaleGestureDetector mScaleGestureD
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
myDetector=new GestureDetector(this,new MyGestureListener());
mScaleGestureDetector=new ScaleGestureDetector(this,new MyScaleGestureListener());
matrix=new Matrix();
myImageView=(ImageView)findViewById(R.id.myImageView);
random=new Random();
public boolean onTouchEvent(MotionEvent event) {
int pointCount = event.getPointerCount();
if(pointCount==1)
return myDetector.onTouchEvent(event);
return mScaleGestureDetector.onTouchEvent(event);
private class MyGestureListener extends SimpleOnGestureListener
Matrix mMatrix=new Matrix();
PointF startPoint=new PointF();
public boolean onScroll(MotionEvent e1, MotionEvent e2, float distanceX,
float distanceY) {
mMatrix.set(myImageView.getImageMatrix());
System.out.println("distanceX:"+distanceX+"distanceY:"+distanceY);
startPoint.set(e1.getRawX(), e1.getRawY());
mMatrix.postTranslate(-distanceX,-distanceY);
myImageView.setImageMatrix(mMatrix);
return false;
public boolean onFling(MotionEvent e1, MotionEvent e2, float velocityX, float velocityY)
final int FLING_MIN_DISTANCE = 100, FLING_MIN_VELOCITY = 200;
if (e1.getX() - e2.getX() & FLING_MIN_DISTANCE && Math.abs(velocityX) & FLING_MIN_VELOCITY) {
myImageView.setImageResource(R.drawable.pic0);
Toast.makeText(getApplicationContext(), "Fling Left", Toast.LENGTH_SHORT).show();
} else if (e2.getX() - e1.getX() & FLING_MIN_DISTANCE && Math.abs(velocityX) & FLING_MIN_VELOCITY) {
switch(random.nextInt(5))
myImageView.setImageResource(R.drawable.pic2);
myImageView.setImageResource(R.drawable.pic3);
myImageView.setImageResource(R.drawable.pic7);
myImageView.setImageResource(R.drawable.pic5);
myImageView.setImageResource(R.drawable.pic6);
Toast.makeText(getApplicationContext(), "Fling Right", Toast.LENGTH_SHORT).show();
return false;
public boolean onDown(MotionEvent arg0) {
Toast.makeText(getApplicationContext(), "onDown", Toast.LENGTH_SHORT).show();
return true;
public boolean onDoubleTap(MotionEvent e)
if(myImageView.isShown())
myImageView.setVisibility(View.INVISIBLE);
else myImageView.setVisibility(View.VISIBLE);
return false;
private class MyScaleGestureListener implements OnScaleGestureListener
private float oldD
private float newD
Matrix mMatrix = new Matrix();
public boolean onScale(ScaleGestureDetector detector) {
newDist=detector.getCurrentSpan();
mMatrix.set(myImageView.getImageMatrix());
float scale=newDist/oldD
System.out.println("scale:"+scale);
mMatrix.postScale(scale, scale,detector.getFocusX(),detector.getFocusY());
myImageView.setImageMatrix(mMatrix);
oldDist=newD
return false;
public boolean onScaleBegin(ScaleGestureDetector detector) {
oldDist=detector.getCurrentSpan();
newDist=detector.getCurrentSpan();
return true;
public void onScaleEnd(ScaleGestureDetector detector) {
public boolean onCreateOptionsMenu(Menu menu) {
getMenuInflater().inflate(R.menu.activity_main, menu);
return true;
代码解释:
这里我定义了两个GestrueListener,一个专门用于处理缩放的ScaleOnGestrueListener一个SimpleOnGestrueListener,当触控的点数为2的时候调用前者来处理,一般常用的手势用后者来处理。
原理和前面的差不多,只是调用不同的接口和不同的方法来实现,但是更加方便也更加清晰.
三、一点后记
学习Andorid中的某个类的时候,其实最好的方法是去看官方的API,有时候网上虽然有现成的代码给你,但实际运用的时候还是会有各种各样的问题,很多文章大都有雷同,甚至代码本身就有bug还往上粘,唉.....所以,最好还是自己踏踏实实研究。
对于自定义View,使用手势识别有两处陷阱可能会浪费你的不少时间。
1:View必须设置longClickable为true,否则手势识别无法正确工作,只会返回Down, Show, Long三种手势
2:必须在View的onTouchListener中调用手势识别,而不能像Activity一样重载onTouchEvent,否则同样手势识别无法正确工作
参考知识库
* 以上用户言论只代表其个人观点,不代表CSDN网站的观点或立场
访问:16870次
排名:千里之外
原创:28篇
转载:25篇
(4)(3)(4)(1)(15)(1)(9)(8)(8)& 2016 Xamarin Inc./ workprojectdemo
项目语言:E
权限:read-only(如需更高权限请先加入项目)
workprojectdemo/
Index: ClipZoomImageView.java
===================================================================
--- ClipZoomImageView.java (revision 0)
+++ ClipZoomImageView.java (revision 58)
@@ -0,0 +1,518 @@
+package com.zhy.
+import android.content.C
+import android.graphics.B
+import android.graphics.Bitmap.C
+import android.graphics.C
+import android.graphics.C
+import android.graphics.M
+import android.graphics.P
+import android.graphics.PorterDuff.M
+import android.graphics.PorterDuffX
+import android.graphics.R
+import android.graphics.RectF;
+import android.graphics.drawable.D
+import android.util.DisplayM
+import android.util.L
+import android.view.GestureD
+import android.view.GestureDetector.SimpleOnGestureL
+import android.view.MotionE
+import android.view.ScaleGestureD
+import android.view.ScaleGestureDetector.OnScaleGestureL
+import android.view.V
+import android.view.View.OnTouchL
+import android.view.ViewTreeO
+import android.view.WindowM
+import android.widget.ImageV
+ * http://blog.csdn.net/lmj/article/details/
+ * @author zhy
+public class ClipZoomImageView extends ImageView implements OnScaleGestureListener, OnTouchListener, ViewTreeObserver.OnGlobalLayoutListener
private static final String TAG = ClipZoomImageView.class.getSimpleName();
public static float SCALE_MAX = 4.0f;
private static float SCALE_MID = 1.0f;
* 初始化时的缩放比例,如果图片宽或高大于屏幕,此值将小于0
private float initScale = 1.0f;
private boolean once =
* 用于存放矩阵的9个值
private final float[] matrixValues = new float[9];
* 缩放的手势检测
private ScaleGestureDetector mScaleGestureDetector =
private final Matrix mScaleMatrix = new Matrix();
* 用于双击检测
private GestureDetector mGestureD
private boolean isAutoS
private int mTouchS
private float mLastX;
private float mLastY;
private boolean isCanD
private int lastPointerC
private int screenW
private int screenH
private int type = 1;
public ClipZoomImageView(Context context, int type, int mHorizontal, int mVertical) {
super(context);
this.mHorizontalPadding = mH
this.mVerticalPadding = mV
this.type = 1;
DisplayMetrics dm = new DisplayMetrics();
WindowManager wm = (WindowManager) getContext().getSystemService(Context.WINDOW_SERVICE);
wm.getDefaultDisplay().getMetrics(dm);
screenWidth = dm.widthP
screenHeight = dm.heightPixels - 100;
setScaleType(ScaleType.MATRIX);
mGestureDetector = new GestureDetector(context, new SimpleOnGestureListener() {
public boolean onDoubleTap(MotionEvent e) {
if (isAutoScale == true)
float x = e.getX();
float y = e.getY();
if (getScale() & SCALE_MID) {
ClipZoomImageView.this.postDelayed(new AutoScaleRunnable(SCALE_MID, x, y), 16);
isAutoScale =
ClipZoomImageView.this.postDelayed(new AutoScaleRunnable(initScale, x, y), 16);
isAutoScale =
mScaleGestureDetector = new ScaleGestureDetector(context, this);
this.setOnTouchListener(this);
* 自动缩放的任务
* @author zhy
private class AutoScaleRunnable implements Runnable {
static final float BIGGER = 1.07f;
static final float SMALLER = 0.73f;
private float mTargetS
private float tmpS
* 缩放的中心
* 传入目标缩放值,根据目标值与当前值,判断应该放大还是缩小
* @param targetScale
public AutoScaleRunnable(float targetScale, float x, float y) {
this.mTargetScale = targetS
if (getScale() & mTargetScale) {
tmpScale = BIGGER;
tmpScale = SMALLER;
public void run() {
// 进行缩放
mScaleMatrix.postScale(tmpScale, tmpScale, x, y);
checkBorder();
setImageMatrix(mScaleMatrix);
final float currentScale = getScale();
// 如果值在合法范围内,继续缩放
if (((tmpScale & 1f) && (currentScale & mTargetScale)) || ((tmpScale & 1f) && (mTargetScale & currentScale))) {
ClipZoomImageView.this.postDelayed(this, 16);
// 设置为目标的缩放比例
final float deltaScale = mTargetScale / currentS
mScaleMatrix.postScale(deltaScale, deltaScale, x, y);
checkBorder();
setImageMatrix(mScaleMatrix);
isAutoScale =
public boolean onScale(ScaleGestureDetector detector) {
float scale = getScale();
float scaleFactor = detector.getScaleFactor();
if (getDrawable() == null)
* 缩放的范围控制
if ((scale & SCALE_MAX && scaleFactor & 1.0f) || (scale & initScale && scaleFactor & 1.0f)) {
* 最大值最小值判断
if (scaleFactor * scale & initScale) {
scaleFactor = initScale /
if (scaleFactor * scale & SCALE_MAX) {
scaleFactor = SCALE_MAX /
* 设置缩放比例
mScaleMatrix.postScale(scaleFactor, scaleFactor, detector.getFocusX(), detector.getFocusY());
checkBorder();
setImageMatrix(mScaleMatrix);
* 根据当前图片的Matrix获得图片的范围
private RectF getMatrixRectF() {
Matrix matrix = mScaleM
RectF rect = new RectF();
Drawable d = getDrawable();
if (null != d) {
rect.set(0, 0, d.getIntrinsicWidth(), d.getIntrinsicHeight());
matrix.mapRect(rect);
public boolean onScaleBegin(ScaleGestureDetector detector) {
public void onScaleEnd(ScaleGestureDetector detector) {
public boolean onTouch(View v, MotionEvent event) {
if (mGestureDetector.onTouchEvent(event))
mScaleGestureDetector.onTouchEvent(event);
float x = 0, y = 0;
// 拿到触摸点的个数
final int pointerCount = event.getPointerCount();
// 得到多个触摸点的x与y均值
for (int i = 0; i & pointerC i++) {
x += event.getX(i);
y += event.getY(i);
x = x / pointerC
y = y / pointerC
* 每当触摸点发生变化时,重置mLasX , mLastY
if (pointerCount != lastPointerCount) {
isCanDrag =
lastPointerCount = pointerC
switch (event.getAction()) {
case MotionEvent.ACTION_MOVE:
float dx = x - mLastX;
float dy = y - mLastY;
if (!isCanDrag) {
isCanDrag = isCanDrag(dx, dy);
if (isCanDrag) {
if (getDrawable() != null) {
RectF rectF = getMatrixRectF();
// 如果宽度小于屏幕宽度,则禁止左右移动
if (rectF.width() &= getWidth() - mHorizontalPadding * 2) {
// 如果高度小雨屏幕高度,则禁止上下移动
if (rectF.height() &= screenHeight - mVerticalPadding * 2) {
mScaleMatrix.postTranslate(dx, dy);
checkBorder();
setImageMatrix(mScaleMatrix);
case MotionEvent.ACTION_UP:
case MotionEvent.ACTION_CANCEL:
lastPointerCount = 0;
* 获得当前的缩放比例
public final float getScale() {
mScaleMatrix.getValues(matrixValues);
return matrixValues[Matrix.MSCALE_X];
protected void onAttachedToWindow() {
super.onAttachedToWindow();
getViewTreeObserver().addOnGlobalLayoutListener(this);
@SuppressWarnings(&deprecation&)
protected void onDetachedFromWindow() {
super.onDetachedFromWindow();
getViewTreeObserver().removeGlobalOnLayoutListener(this);
* 水平方向与View的边距
private int mHorizontalP
* 垂直方向与View的边距
private int mVerticalP
public void onGlobalLayout() {
if (once) {
Drawable d = getDrawable();
if (d == null)
// 垂直方向的边距
mVerticalPadding = (getHeight() - (getWidth() - 2 * mHorizontalPadding)) / 2;
if (type == 1) {
mVerticalPadding = (screenHeight - (((screenWidth * 2) / 3) * 3) / 2) / 2;
mVerticalPadding = (screenHeight - 300) / 2;
int width = getWidth();// 720
int height = screenH // 1230,这个获取的高度是不包括状态栏
// 拿到图片的宽和高
int dw = d.getIntrinsicWidth();// 114
int dh = d.getIntrinsicHeight();// 114
float scale = 1.0f;
// 120=240
280 = 280*2
Log.i(TAG, &图片的宽度===& + dw + &图片的高度====& + dh);
// 判断如果图片宽度小,高度高,就缩小的比较为宽
if (dw & width - mHorizontalPadding * 2 && dh & height - mVerticalPadding * 2) {
scale = (width * 1.0f - mHorizontalPadding * 2) /
// 判断如果图片宽度高,高度小,就缩小的比较为高
if (dh & height - mVerticalPadding * 2 && dw & width - mHorizontalPadding * 2) {
scale = (height * 1.0f - mVerticalPadding * 2) /
// 都比较小,取大的
if (dw & width - mHorizontalPadding * 2 && dh & height - mVerticalPadding * 2) {
float scaleW = (width * 1.0f - mHorizontalPadding * 2) /
float scaleH = (height * 1.0f - mVerticalPadding * 2) /
scale = Math.max(scaleW, scaleH);
// 都比较大的,
if (dw & width - mHorizontalPadding * 2 && dh & height - mVerticalPadding * 2) {
float scaleW = (width * 1.0f - mHorizontalPadding * 2) /
float scaleH = (height * 1.0f - mVerticalPadding * 2) /
scale = Math.min(scaleW, scaleH);
initScale =
SCALE_MID = initScale * 2;
SCALE_MID = 0.1f;
Log.i(TAG, &scalescalescale===& + scale);
SCALE_MAX = scale * 4;
mScaleMatrix.postTranslate((width - dw) / 2, ((height - dh) / 2));
mScaleMatrix.postScale(scale, scale, width / 2, height / 2);
// 图片移动至屏幕中心
setImageMatrix(mScaleMatrix);
* 剪切图片,返回剪切后的bitmap对象
public Bitmap clip() {
Bitmap bitmap = Bitmap.createBitmap(getWidth(), screenHeight, Bitmap.Config.RGB_565);
Canvas canvas = new Canvas(bitmap);
canvas.drawColor(Color.parseColor(&#2a2c32&));
draw(canvas);
if (type == 1) {
return Bitmap.createBitmap(bitmap, mHorizontalPadding, mVerticalPadding, getWidth() - 2 * mHorizontalPadding, screenHeight - 2 * mVerticalPadding);
return toRoundBitmap(Bitmap.createBitmap(bitmap, (getWidth() - 300) / 2, (screenHeight - 300) / 2, 300, 300));
* 转换图片成圆形
* @param bitmap
传入Bitmap对象
public Bitmap toRoundBitmap(Bitmap bitmap) {
int width = bitmap.getWidth();
int height = bitmap.getHeight();
float roundPx;
float left, top, right, bottom, dst_left, dst_top, dst_right, dst_
if (width &= height) {
roundPx = width / 2;
dst_left = 0;
dst_top = 0;
dst_right =
dst_bottom =
roundPx = height / 2;
float clip = (width - height) / 2;
right = width -
dst_left = 0;
dst_top = 0;
dst_right =
dst_bottom =
Bitmap output = Bitmap.createBitmap(width, height, Config.ARGB_8888);
Canvas canvas = new Canvas(output);
final int color = 0xff424242;
final Paint paint = new Paint();
final Rect src = new Rect((int) left, (int) top, (int) right, (int) bottom);
final Rect dst = new Rect((int) dst_left, (int) dst_top, (int) dst_right, (int) dst_bottom);
final RectF rectF = new RectF(dst);
paint.setAntiAlias(true);// 设置画笔无锯齿
canvas.drawARGB(0, 0, 0, 0); // 填充整个Canvas
paint.setColor(color);
// 以下有两种方法画圆,drawRounRect和drawCircle
// canvas.drawRoundRect(rectF, roundPx, roundPx, paint);// 画圆角矩形,第一个参数为图形显示区域,第二个参数和第三个参数分别是水平圆角半径和垂直圆角半径。
canvas.drawCircle(roundPx, roundPx, roundPx, paint);
paint.setXfermode(new PorterDuffXfermode(Mode.SRC_IN));// 设置两张图片相交时的模式,参考/blog/1189452
canvas.drawBitmap(bitmap, src, dst, paint); //以Mode.SRC_IN模式合并bitmap和已经draw了的Circle
* 边界检测
private void checkBorder() {
RectF rect = getMatrixRectF();
float deltaX = 0;
float deltaY = 0;
int width = getWidth();
int height = screenH
Log.e(TAG, &rect.width() =
& + rect.width() + & , width - 2 * mHorizontalPadding =& + (width - 2 * mHorizontalPadding));
Log.e(TAG, &rect.height() =
& + rect.height() + & , height - 2 * mVerticalPadding =& + (height - 2 * mVerticalPadding));
// 如果宽或高大于屏幕,则控制范围 ; 这里的0.001是因为精度丢失会产生问题,但是误差一般很小,所以我们直接加了一个0.01
if (rect.width() + 0.01 &= width - 2 * mHorizontalPadding) {
if (rect.left & mHorizontalPadding) {
deltaX = -rect.left + mHorizontalP
if (rect.right & width - mHorizontalPadding) {
deltaX = width - mHorizontalPadding - rect.
if (rect.height() + 0.01 &= height - 2 * mVerticalPadding) {
Log.i(TAG, &rect.top==& + rect.top);
if (rect.top & mVerticalPadding) {
deltaY = -rect.top + mVerticalP
Log.i(TAG, &rect.bottom==& + rect.bottom);
if (rect.bottom & height - mVerticalPadding) {
deltaY = height - mVerticalPadding - rect.
Log.i(TAG, &deltaY==& + deltaY);
mScaleMatrix.postTranslate(deltaX, deltaY);
* 是否是拖动行为
* @param dx
* @param dy
private boolean isCanDrag(float dx, float dy) {
return Math.sqrt((dx * dx) + (dy * dy)) &= mTouchS
public void setHorizontalPadding(int mHorizontalPadding) {
this.mHorizontalPadding = mHorizontalP
public void setmVerticalPadding(int mVerticalPadding) {
this.mVerticalPadding = mVerticalP
(C)&&2013&&Alibaba&&Inc.&&All&&rights&&resvered.
Powered by

我要回帖

更多关于 gesturedetector 的文章

 

随机推荐