분석 전 기본세팅
분석 전 기본세팅, 필요코드 (불러오기, 내보내기, random forest, catboost, cross validation ..)
data = pd.read_csv("G:/내 드라이브/bb/cc/data.csv')
test.to_csv('test.csv', index = False)
- 해당 소스코드가 있는 곳에 파일이 내보내진다.
test.to_csv('G:/내 드라이브/Github/TIL-Blog/test.csv', index = False)
- 해당 경로에 파일이 내보내진다.
import pandas as pd # pandas
import numpy as np # numpy
import matplotlib.pyplot as plt # matplotlib
import matplotlib
import seaborn as sns # seaborn
train = pd.read_csv('https://bit.ly/fc-ml-titanic')
feature = [
'Pclass', 'Sex', 'Age', 'Fare'
]
label = [
'Survived'
]
from sklearn.model_selection import train_test_split
- test_size: validation set에 할당할 비율 (20% -> 0.2)
- shuffle: 셔플 옵션 (기본 True)
- random_state: 랜덤 시드값
x_train, x_valid, y_train, y_valid = train_test_split(train[feature], train[label], test_size=0.2, shuffle=True, random_state=30)
from sklearn.impute import SimpleImputer
칼럼 1개 처리하는 경우
train['Age'].fillna(train['Age'].mean())
칼럼 여러개 처리하는 경우
imputer = SimpleImputer(strategy='median') ## 한번에 여러개 처리. median, mean ...
result = imputer.fit_transform(train[['Age', 'Pclass']])
train[['Age', 'Pclass']] = result
train = pd.read_csv('https://bit.ly/fc-ml-titanic')
컬럼 1개 처리하는 경우
train['Embarked'].fillna('S')
칼럼 여러개 처리하는 경우
imputer = SimpleImputer(strategy='most_frequent')
result = imputer.fit_transform(train[['Embarked', 'Cabin']])
train[['Embarked', 'Cabin']] = result
from sklearn.preprocessing import LabelEncoder
train['Embarked_num'] = LabelEncoder().fit_transform(train['Embarked'])
train['Embarked_num'].value_counts()
pd.get_dummies(train['Embarked_num'], prefix = 'Embarked')
movie = {'naver': [2, 4, 6, 8, 10],
'netflix': [1, 2, 3, 4, 5]}
movie = pd.DataFrame(data=movie)
from sklearn.preprocessing import MinMaxScaler
min_max_movie = MinMaxScaler().fit_transform(movie)
pd.DataFrame(min_max_movie, columns=['naver', 'netflix'])
from sklearn.preprocessing import StandardScaler
x = np.arange(10)
# outlier 추가
x[9] = 1000
x = x.reshape(-1, 1)
scaled = StandardScaler().fit_transform(x)
round(scaled.mean(), 2), scaled.std()
from sklearn.datasets import load_boston
data = load_boston()
df = pd.DataFrame(data['data'], columns=data['feature_names'])
df['MEDV'] = data['target']
from lightgbm import LGBMRegressor, LGBMClassifier
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(df.drop('MEDV', 1), df['MEDV'], random_state=42)
from sklearn.model_selection import KFold
n_splits = 5
kfold = KFold(n_splits=n_splits, shuffle = True, random_state=42)
X = np.array(df.drop('MEDV', 1))
Y = np.array(df['MEDV'])
lgbm_fold = LGBMRegressor(random_state=42)
i = 1
total_error = 0
for train_index, test_index in kfold.split(X):
x_train_fold, x_test_fold = X[train_index], X[test_index]
y_train_fold, y_test_fold = Y[train_index], Y[test_index]
lgbm_pred_fold = lgbm_fold.fit(x_train_fold, y_train_fold).predict(x_test_fold)
error = mean_squared_error(lgbm_pred_fold, y_test_fold)
print('Fold = {}, prediction score = {:.2f}'.format(i, error))
total_error += error
i+=1
print('---'*10)
print('Average Error: %s' % (total_error / n_splits))
params = {
'n_estimators': [200, 500, 1000, 2000],
'learning_rate': [0.1, 0.05, 0.01],
'max_depth': [6, 7, 8],
'colsample_bytree': [0.8, 0.9, 1.0],
'subsample': [0.8, 0.9, 1.0],
}
주요 Hyperparameter (LGBM)
- random_state: 랜덤 시드 고정 값. 고정해두고 튜닝할 것!
- n_jobs: CPU 사용 갯수
- learning_rate: 학습율. 너무 큰 학습율은 성능을 떨어뜨리고, 너무 작은 학습율은 학습이 느리다. 적절한 값을 찾아야함. n_estimators와 같이 튜닝. default=0.1
- n_estimators: 부스팅 스테이지 수. (랜덤포레스트 트리의 갯수 설정과 비슷한 개념). default=100
- max_depth: 트리의 깊이. 과대적합 방지용. default=3.
- colsample_bytree: 샘플 사용 비율 (max_features와 비슷한 개념). 과대적합 방지용. default=1.0
from sklearn.model_selection import RandomizedSearchCV
n_iter
값을 조절하여, 총 몇 회의 시도를 진행할 것인지 정의합니다.
(회수가 늘어나면, 더 좋은 parameter를 찾을 확률은 올라가지만, 그만큼 시간이 오래걸립니다.)
clf = RandomizedSearchCV(LGBMRegressor(), params, random_state=42, cv=3, n_iter=25, scoring='neg_mean_squared_error')
clf.fit(x_train, y_train)
clf.best_score_
clf.best_params_
lgbm_best = LGBMRegressor(n_estimators=2000, subsample=0.8, max_depth=7, learning_rate=0.01, colsample_bytree=0.8)
lgbm_best_pred = lgbm_best.fit(x_train, y_train).predict(x_test)
- 모든 매개 변수 값에 대하여 완전 탐색을 시도합니다.
- 따라서, 최적화할 parameter가 많다면, 시간이 매우 오래걸립니다.
params = {
'n_estimators': [500, 1000],
'learning_rate': [0.1, 0.05, 0.01],
'max_depth': [7, 8],
'colsample_bytree': [0.8, 0.9],
'subsample': [0.8, 0.9,],
}
from sklearn.model_selection import GridSearchCV
grid_search = GridSearchCV(LGBMRegressor(), params, cv=3, n_jobs=-1, scoring='neg_mean_squared_error')
grid_search.fit(x_train, y_train)
grid_search.best_score_
grid_search.best_params_
lgbm_best = LGBMRegressor(n_estimators=500, subsample=0.8, max_depth=7, learning_rate=0.05, colsample_bytree=0.8)
lgbm_best_pred = lgbm_best.fit(x_train, y_train).predict(x_test)
from catboost import CatBoostRegressor # 캣부스트 회귀
from catboost import CatBoostClassifier # 캣부스트 분류
model = CatBoostRegressor()
model.fit(X_train, y_train, silent=True)
pred = model.predict(X_test)
rmse = (np.sqrt(np.mean(mean_squared_error(y_test, pred))))
rmse
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=100, oob_score=True, random_state=123,max_depth=6)
rf.fit(X_train, y_train)
from xgboost import XGBRegressor
from xgboost import XGBClassifier
from lightgbm import LGBMRegressor
from lightgbm import LGBMClassifier
from sklearn.metrics import mean_squared_error
rmse = (np.sqrt(np.mean(mean_squared_error(y_test, pred))))
rmse
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(y_test, predicted)