fall down
import pandas as pd
In [2]:
df = pd.read_csv('/home/helong/share/ML/MobiAct_Dataset_v2.0/Annotated Data/FOL/FOL_1_1_annotated.csv')
In [3]:
df.head()
Out[3]:
In [4]:
x = df[['acc_x','acc_y','acc_z']]
In [5]:
x.head()
Out[5]:
In [11]:
data_move=20
data_scale=6
def transform_rgb(x):
return (x + data_move) * data_scale
x = df[['acc_x','acc_y','acc_z']].apply(transform_rgb)
In [12]:
x.head()
Out[12]:
In [13]:
x = x.stack().to_frame().T
In [14]:
x.head()
Out[14]:
In [8]:
x.head()
Out[8]:
In [3]:
import os
import tensorflow as tf
import numpy as np
import pandas as pd
convert the data
In [36]:
df_list = []
sum_df = pd.DataFrame()
#sum_df = df_.fillna(0) # with 0s rather than NaNs
PATH = '/home/helong/share/ML/MobiAct_Dataset_v2.0/Annotated Data/STU'
for file in os.listdir(PATH):
# print(file)
df = pd.read_csv(os.path.join(PATH,file))
if not df.empty:
df_list.append(df)
for df in df_list:
x = df[['acc_x','acc_y','acc_z']].apply(transform_rgb)
x = x.stack().to_frame().T
# print(x.head())
sum_df = sum_df.append(x)
#sum_df.insert(idx, col_name, value)
sum_df.insert(loc=0, column='A', value=0)
print(sum_df.head())
#print(sum_df.info())
sum_df.to_csv('/home/helong/share/ML/MobiAct_Dataset_v2.0/tran_data_transform/STU.csv',index=False)
#final_df = df.append(df for df in df_list)
#final_df[0].count()
print("done")
In [2]:
def get_all_data():
PATH = '/home/helong/share/ML/MobiAct_Dataset_v2.0/train_data_transform'
fs = os.listdir(PATH)
all_data = pd.DataFrame()
for f in fs:
file_path = os.path.join(PATH, f)
print(file_path)
if 'csv' in f:
data = pd.read_csv(file_path, index_col=False, nrows=5, low_memory=False)
data = data.iloc[1:,0:1201]
#print(data.head())
#break
all_data = all_data.append(data)
#for fast test
#break
#count_row = all_data.shape[0]
#print(count_row)
np.random.shuffle(all_data.values)
return all_data
In [4]:
def get_test_data():
PATH = '/home/helong/share/ML/MobiAct_Dataset_v2.0/train_data_transform'
fs = os.listdir(PATH)
all_data = pd.DataFrame()
for f in fs:
file_path = os.path.join(PATH, f)
print(file_path)
if 'csv' in f:
data = pd.read_csv(file_path, index_col=False, low_memory=False)
data = data.iloc[1:,0:1201]
#print(data.head())
#break
all_data = all_data.append(data)
#for fast test
#break
#count_row = all_data.shape[0]
#print(count_row)
np.random.shuffle(all_data.values)
return all_data
In [46]:
data = get_all_data()
print(data.shape[0])
#all_data.to_csv('/home/helong/share/ML/MobiAct_Dataset_v2.0/all_data_transform.csv',nrows=10, index=False)
#all_data.to_csv('/home/helong/share/ML/MobiAct_Dataset_v2.0/all_data_transform.csv',index=False)
In [5]:
CLASS_NUM = 1
LEARNING_RATE = 0.001
TRAIN_STEP = 10000
BATCH_SIZE = 50
_index_in_epoch = 0
_epochs_completed = 0
_num_examples = 0
MODEL_SEVE_PATH = '../model/model.ckpt'
In [6]:
def wights_variable(shape):
'''
权重变量tensor
:param shape:
:return:
'''
wights = tf.truncated_normal(shape=shape,stddev=0.1)
return tf.Variable(wights,dtype=tf.float32)
def biases_variable(shape):
'''
偏置变量tensor
:param shape:
:return:
'''
bias = tf.constant(0.1,shape=shape)
return tf.Variable(bias,dtype=tf.float32)
def conv2d(x,kernel):
'''
网络卷积层
:param x: 输入x
:param kernel: 卷积核
:return: 返回卷积后的结果
'''
return tf.nn.conv2d(x,kernel,strides=[1,1,1,1],padding='SAME')
def max_pooling_2x2(x):
'''
最大赤化层
:param x: 输入x
:return: 返回池化后数据
'''
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
def lrn(x):
'''
local response normalization
局部响应归一化,可以提高准确率
:param x: 输入x
:return:
'''
return tf.nn.lrn(x,4,1.0,0.001,0.75)
def fall_net(x):
'''
跌到检测网络
:param x: 输入tensor,shape=[None,]
:return:
'''
with tf.name_scope('reshape'):
x = tf.reshape(x,[-1,20,20,3])
#x = x / 255.0 * 2 - 1
with tf.name_scope('conv1'):
# value shape:[-1,18,18,32]
conv1_kernel = wights_variable([5,5,3,32])
conv1_bias