hadoop - Task process exit with nonzero status of 134

hadoop通过jni调用c++,出现Task process exit with nonzero status of 134错误探究。

一般来说,如果不管运行什么程序都出现这种错误,应该就是库的问题,网上有说。如果运行某些程序有问题,有些没有问题,应该就是自己程序的问题。(我遇到这种情况基本都是指针操作问题,操作了一个野指针!比如下面的第二种情况)

1.将jni层delete[] 注释掉,问题解决

JNIEXPORT jbyteArray JNICALL Java_hequn_hadoop_image_hq_1Image_1Process_zqf_1koutu
  (JNIEnv *env, jclass obj, jbyteArray src, jbyteArray ref, jint width, jint height)
{
         unsigned char *uc_src = new unsigned char[width * height * 3];
         unsigned char *uc_ref = new unsigned char[width * height * 3];
         unsigned char *uc_des = NULL;
 
         jsize len = env->GetArrayLength(src);
         jbyte *srcbody = env->GetByteArrayElements(src, 0);
         jbyte *refbody = env->GetByteArrayElements(ref, 0);
 
 
         int idx = 0;
         for(int i = 0; i < width; ++i) {
                 for(int j = 0; j < height; ++j) {
                         uc_src[idx] = srcbody[idx];
                         uc_src[idx + 1] = srcbody[idx + 1];
                         uc_src[idx + 2] = srcbody[idx + 2];
 
                         if(uc_src[idx] < 0) uc_src[idx] += 256;
                         if(uc_src[idx + 1] < 0) uc_src[idx + 1] += 256;
                         if(uc_src[idx + 2] < 0) uc_src[idx + 2] += 256;
 
                         uc_ref[idx] = refbody[idx];
                         uc_ref[idx + 1] = refbody[idx + 1];
                         uc_ref[idx + 2] = refbody[idx + 2];
 
                         if(uc_ref[idx] < 0) uc_ref[idx] += 256;
                         if(uc_ref[idx + 1] < 0) uc_ref[idx + 1] += 256;
                         if(uc_ref[idx + 2] < 0) uc_ref[idx + 2] += 256;
 
                         idx += 3;
                 }
         }
 
         chromakey ch;
         ch.koutu(uc_src, uc_ref);
         uc_des = ch.koutu_result;
 
         jbyteArray des = env->NewByteArray(len);
         jbyte *desp = env->GetByteArrayElements(des, 0);
 
         idx = 0;
         for(int i = 0; i < width; ++i) {
                 for(int j = 0; j < height; ++j) {
                        g_byte_buf[idx] = uc_des[idx];
			idx ++;
                 }
         }
	
         env->SetByteArrayRegion(des, 0, 1024 * 768, g_byte_buf);
        
//	if(uc_src != NULL) delete[] uc_src;
//	if(uc_ref != NULL) delete[] uc_ref;
//	 delete[] uc_src;
//	 delete[] uc_ref;
	 return des;

}

2.将int *num = new int 改成int *num = &PointsNum,即把动态申请,改成指向一个定义的整形,问题解决。原因是IBVH(uc_src, cabli, f_des, num);函数里将num指针指向了一个成员变量的地址,当IBVH()执行结束,成员变量被释放,num指针便指向了一个未知的地址,造成错误。

JNIEXPORT jfloatArray JNICALL Java_hequn_hadoop_image_hq_1Image_1Process_my_1IPVH
(JNIEnv *env, jclass obj, jbyteArray src, jdoubleArray cab) 
{
	unsigned char *uc_src = new unsigned char[1024 * 768 * 20];
	float *f_des = new float[400000];
	double cabli[240];
	int PointsNum = 0;
 	int *num = &PointsNum;

	jsize len = env->GetArrayLength(src);
	jbyte *srcbody = env->GetByteArrayElements(src, 0);
	jdouble *cabbody = env->GetDoubleArrayElements(cab, 0);

	int LENGTH = 1024 * 768 * 20;
	for(int i = 0; i < LENGTH; ++i) {
		uc_src[i] = srcbody[i];
		if(uc_src[i] < 0) uc_src[i] += 256;
	}

	for(int i = 0; i < 240; ++i) {
		cabli[i] = cabbody[i];
	}

	IBVH(uc_src, cabli, f_des, num);
	/*
	PointsNum = 10;
	for(int i = 0; i < 10; ++i) f_des[i] = 1.1;
	*/
	for(int i = 0; i < (PointsNum); ++i) {
		g_float_buf[i] = f_des[i];
	}

	jfloatArray des = env->NewFloatArray(PointsNum);
	jfloat *desbody = env->GetFloatArrayElements(des, 0);
	
	env->SetFloatArrayRegion(des, 0, PointsNum, g_float_buf);


//	delete []uc_src;
//	delete []f_des;

	return des;
}

 3.还有网上说是本地库的问题,用ant重新编译了下库。编译的过程也不是很顺利,因为ant compile-native要有软件支持,只能yum install。编译完之后,把库文件替换后还是出现Task process exit with nonzero status of 134错误。

而且重新编译后,浏览器访问192.168.178.92:50030(即jobtracker)出现404 not found。试了很多种方法,都没用。还好之前备份过haoop,所以就还原了下,还好还原回去了。。。

posted on 2013-12-20 22:02  hequn8128  阅读(508)  评论(0)    收藏  举报

导航