validation and KW Hammer Lastfälle

This commit is contained in:
Georg ´Brantegger
2022-09-27 15:31:36 +02:00
parent e1c7bc9d07
commit a951ea9b64
45 changed files with 1378443 additions and 1273749 deletions

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
@@ -14,7 +14,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
@@ -37,7 +37,7 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
@@ -46,7 +46,7 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
@@ -56,7 +56,7 @@
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": 15,
"metadata": {},
"outputs": [],
"source": [
@@ -71,12 +71,13 @@
"UT_M1_p = UT_df['UL_T1_p'].to_numpy()\n",
"UT_M2_p = UT_df['UL_T2_p'].to_numpy()\n",
"UT_M1_LA = UT_df['UL_T1_LA'].to_numpy()\n",
"UT_M2_LA = UT_df['UL_T2_LA'].to_numpy()"
"UT_M2_LA = UT_df['UL_T2_LA'].to_numpy()\n",
"UT_Ausl = UT_df['Ausl'].to_numpy()\n"
]
},
{
"cell_type": "code",
"execution_count": 21,
"execution_count": 16,
"metadata": {},
"outputs": [
{
@@ -85,7 +86,7 @@
"(1657542740.9878564, 1657553169.3173888)"
]
},
"execution_count": 21,
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
@@ -116,30 +117,21 @@
},
{
"cell_type": "code",
"execution_count": 48,
"execution_count": 21,
"metadata": {},
"outputs": [
{
"ename": "TypeError",
"evalue": "Cannot construct a dtype from an array",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mTypeError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32mv:\\georg\\Documents\\Persönliche Dokumente\\Arbeit\\Kelag\\Coding\\Python\\DT_Slot_3\\Kelag_DT_Slot_3\\Validation Data\\consolidated pandas dataframes\\consolidate_validation_data.ipynb Cell 7\u001b[0m in \u001b[0;36m<cell line: 7>\u001b[1;34m()\u001b[0m\n\u001b[0;32m <a href='vscode-notebook-cell:/v%3A/georg/Documents/Pers%C3%B6nliche%20Dokumente/Arbeit/Kelag/Coding/Python/DT_Slot_3/Kelag_DT_Slot_3/Validation%20Data/consolidated%20pandas%20dataframes/consolidate_validation_data.ipynb#X10sZmlsZQ%3D%3D?line=4'>5</a>\u001b[0m fig \u001b[39m=\u001b[39m plt\u001b[39m.\u001b[39mfigure()\n\u001b[0;32m <a href='vscode-notebook-cell:/v%3A/georg/Documents/Pers%C3%B6nliche%20Dokumente/Arbeit/Kelag/Coding/Python/DT_Slot_3/Kelag_DT_Slot_3/Validation%20Data/consolidated%20pandas%20dataframes/consolidate_validation_data.ipynb#X10sZmlsZQ%3D%3D?line=5'>6</a>\u001b[0m plt\u001b[39m.\u001b[39mplot(UT_t_vec[mask_UT],TB_level[mask_UT])\n\u001b[1;32m----> <a href='vscode-notebook-cell:/v%3A/georg/Documents/Pers%C3%B6nliche%20Dokumente/Arbeit/Kelag/Coding/Python/DT_Slot_3/Kelag_DT_Slot_3/Validation%20Data/consolidated%20pandas%20dataframes/consolidate_validation_data.ipynb#X10sZmlsZQ%3D%3D?line=6'>7</a>\u001b[0m validation_data_UT \u001b[39m=\u001b[39m np\u001b[39m.\u001b[39;49marray([UT_t_vec[mask_UT],UT_M1_LA[mask_UT],UT_M2_LA[mask_UT],UT_M1_p[mask_UT],UT_M2_p[mask_UT]],TB_level[mask_UT])\n\u001b[0;32m <a href='vscode-notebook-cell:/v%3A/georg/Documents/Pers%C3%B6nliche%20Dokumente/Arbeit/Kelag/Coding/Python/DT_Slot_3/Kelag_DT_Slot_3/Validation%20Data/consolidated%20pandas%20dataframes/consolidate_validation_data.ipynb#X10sZmlsZQ%3D%3D?line=7'>8</a>\u001b[0m validation_data_TB \u001b[39m=\u001b[39m np\u001b[39m.\u001b[39marray([TB_t_vec[mask_TB],TB_M1_LA[mask_TB],TB_M2_LA[mask_TB],TB_M1_p[mask_TB],TB_M2_p[mask_TB]])\n",
"\u001b[1;31mTypeError\u001b[0m: Cannot construct a dtype from an array"
]
}
],
"outputs": [],
"source": [
"mask_UT = np.logical_and(t1<UT_t_vec,UT_t_vec <t2)\n",
"mask_TB = np.logical_and(t1<TB_t_vec,TB_t_vec <t2)\n",
"\n",
"\n",
"fig = plt.figure()\n",
"plt.plot(UT_t_vec[mask_UT],TB_level[mask_UT])\n",
"validation_data_UT = np.array([UT_t_vec[mask_UT];UT_M1_LA[mask_UT];UT_M2_LA[mask_UT];UT_M1_p[mask_UT];UT_M2_p[mask_UT]];TB_level[mask_UT])\n",
"validation_data_TB = np.array([TB_t_vec[mask_TB];TB_M1_LA[mask_TB];TB_M2_LA[mask_TB];TB_M1_p[mask_TB];TB_M2_p[mask_TB]])"
"\n",
"validation_data_UT = UT_df[mask_UT]\n",
"validation_data_TB = TB_df[mask_TB]\n",
"\n",
"validation_data_UT.to_csv('Validation_data_UT.csv')\n",
"validation_data_TB.to_csv('Validation_data_TB.csv')\n",
"\n"
]
}
],

File diff suppressed because it is too large Load Diff

View File

@@ -35,7 +35,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
"C:\\Users\\georg\\AppData\\Local\\Temp\\ipykernel_29732\\1290488230.py:2: ParserWarning: Length of header or names does not match length of data. This leads to a loss of data with index_col=False.\n",
"C:\\Users\\georg\\AppData\\Local\\Temp\\ipykernel_24336\\3411177260.py:2: ParserWarning: Length of header or names does not match length of data. This leads to a loss of data with index_col=False.\n",
" raw_data = pd.read_csv(\"2015_08_25 15.20 M12 SS100%.csv\",sep=\";\",header=7,index_col=False)\n"
]
}
@@ -53,7 +53,7 @@
"df['M1-LA'] = pd.to_numeric(raw_data['M1-LA'])/100\n",
"df['M2-LA'] = pd.to_numeric(raw_data['M2-LA'])/100\n",
"df['Druck'] = pd.to_numeric(raw_data['P-DRL'])\n",
"df['Pegel'] = pd.to_numeric(raw_data['Pegel-UW'])\n",
"df['Pegel'] = pd.to_numeric(raw_data['PEGEL-UW'])\n",
"\n",
"val_t_vec_raw = np.array(df['timestamp']-df['timestamp'][0])\n",
"val_LA1_vec_raw = np.array(df['M1-LA']) \n",
@@ -501,7 +501,7 @@
{
"data": {
"text/plain": [
"[<matplotlib.lines.Line2D at 0x1e74e242940>]"
"[<matplotlib.lines.Line2D at 0x18a94bcf820>]"
]
},
"execution_count": 10,

View File

@@ -21,7 +21,8 @@
"M1_p_df = pd.read_csv('M1_Druck.txt',delimiter=';')\n",
"M2_p_df = pd.read_csv('M2_Druck.txt',delimiter=';')\n",
"M1_LA_df = pd.read_csv('M1_LA.txt',delimiter=';')\n",
"M2_LA_df = pd.read_csv('M2_LA.txt',delimiter=';')"
"M2_LA_df = pd.read_csv('M2_LA.txt',delimiter=';')\n",
"Ausl_df = pd.read_csv('AL_SeitAuslPos.txt',delimiter=';')"
]
},
{
@@ -34,7 +35,8 @@
"M1_p_df['Timestamp'] = M1_p_df['TIMESTAMP UNIX']+M1_p_df['TIMESTAMP MS']/1000.\n",
"M2_p_df['Timestamp'] = M2_p_df['TIMESTAMP UNIX']+M2_p_df['TIMESTAMP MS']/1000.\n",
"M1_LA_df['Timestamp'] = M1_LA_df['TIMESTAMP UNIX']+M1_LA_df['TIMESTAMP MS']/1000.\n",
"M2_LA_df['Timestamp'] = M2_LA_df['TIMESTAMP UNIX']+M2_LA_df['TIMESTAMP MS']/1000."
"M2_LA_df['Timestamp'] = M2_LA_df['TIMESTAMP UNIX']+M2_LA_df['TIMESTAMP MS']/1000.\n",
"Ausl_df['Timestamp'] = Ausl_df['TIMESTAMP UNIX']+Ausl_df['TIMESTAMP MS']/1000."
]
},
{
@@ -47,7 +49,8 @@
"M1_p_df.set_index('Timestamp',inplace=True)\n",
"M2_p_df.set_index('Timestamp',inplace=True)\n",
"M1_LA_df.set_index('Timestamp',inplace=True)\n",
"M2_LA_df.set_index('Timestamp',inplace=True)"
"M2_LA_df.set_index('Timestamp',inplace=True)\n",
"Ausl_df.set_index('Timestamp',inplace=True)"
]
},
{
@@ -61,12 +64,14 @@
"M2_p_df.drop(columns=['VARIABLE','TIMESTAMP UNIX', 'TIMESTAMP MS'],inplace=True)\n",
"M1_LA_df.drop(columns=['VARIABLE','TIMESTAMP UNIX', 'TIMESTAMP MS'],inplace=True)\n",
"M2_LA_df.drop(columns=['VARIABLE','TIMESTAMP UNIX', 'TIMESTAMP MS'],inplace=True)\n",
"Ausl_df.drop(columns=['VARIABLE','TIMESTAMP UNIX', 'TIMESTAMP MS'],inplace=True)\n",
"\n",
"pegel_df.rename(columns={'VALUE': 'TB-Pegel'},inplace=True)\n",
"M1_p_df.rename(columns={'VALUE': 'M1-Druck'},inplace=True)\n",
"M2_p_df.rename(columns={'VALUE': 'M2-Druck'},inplace=True)\n",
"M1_LA_df.rename(columns={'VALUE': 'M1-LA'},inplace=True)\n",
"M2_LA_df.rename(columns={'VALUE': 'M2-LA'},inplace=True)\n"
"M2_LA_df.rename(columns={'VALUE': 'M2-LA'},inplace=True)\n",
"Ausl_df.rename(columns={'VALUE': 'Ausl'},inplace=True)\n"
]
},
{
@@ -75,7 +80,7 @@
"metadata": {},
"outputs": [],
"source": [
"UT_df = pegel_df.join([M1_LA_df,M1_p_df,M2_LA_df,M2_p_df],how='outer').sort_index()"
"UT_df = pegel_df.join([M1_LA_df,M1_p_df,M2_LA_df,M2_p_df,Ausl_df],how='outer').sort_index()"
]
},
{