Skip to content

Commit

Permalink
Patched sql injections
Browse files Browse the repository at this point in the history
  • Loading branch information
u8sand committed Feb 24, 2021
1 parent 704fa12 commit 3e411b9
Show file tree
Hide file tree
Showing 4 changed files with 10 additions and 8 deletions.
4 changes: 3 additions & 1 deletion database/data/upload/pipeline/scripts/Upload.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,9 @@ def upload_data(data, keys, table, reset_counter=True):
#############################################

def exists(accession, version):
return len(pd.read_sql_query('SELECT * FROM dataset_{version} WHERE dataset_accession = "{accession}"'.format(**locals()), engine).index)
table = 'dataset_{version}'.format(version=version)
assert table in tables, 'Invalid table'
return len(pd.read_sql_query('SELECT * FROM {table} WHERE dataset_accession = %s'.format(table=table), engine, params=(accession,)).index)

#############################################
########## 2. Upload dataset
Expand Down
2 changes: 1 addition & 1 deletion server/app/static/py/NotebookManager.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def upload_notebook(notebook, notebook_configuration, time, engine, user_id=None
tool_dict = pd.read_sql_table('tool', engine).set_index('tool_string')['id'].to_dict()

# Get notebook ID
notebook_id = pd.read_sql_query('SELECT id FROM notebook WHERE notebook_uid = "{}"'.format(notebook_uid), engine)['id'][0]
notebook_id = pd.read_sql_query('SELECT id FROM notebook WHERE notebook_uid = %s', engine, params=(notebook_uid,))['id'][0]

# Notebook-tool dataframe
notebook_tool_dataframe = pd.DataFrame({'tool_fk': [tool_dict[x['tool_string']] for x in notebook_configuration['tools']], 'notebook_fk': notebook_id})
Expand Down
6 changes: 3 additions & 3 deletions website/app/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -631,7 +631,7 @@ def generate_notebook():
selected_tools = [tools[x['tool_string']] for x in c['tools']]

# Estimated wait time
wait_times = pd.read_sql_query('SELECT time, count(tool_fk) AS tools FROM notebook n LEFT JOIN notebook_tool nt ON n.id=nt.notebook_fk GROUP BY n.id HAVING time > 0 AND tools ='+str(len(p)), engine)['time']
wait_times = pd.read_sql_query('SELECT time, count(tool_fk) AS tools FROM notebook n LEFT JOIN notebook_tool nt ON n.id=nt.notebook_fk GROUP BY n.id HAVING time > 0 AND tools = %s', engine, params=(str(len(p)),))['time']
expected_time = int(np.ceil(np.percentile(wait_times, 90)/60))

# Return result
Expand Down Expand Up @@ -1408,9 +1408,9 @@ def example():
dataset_v6 d
LEFT JOIN sample_v6 s ON d.id = s.dataset_fk
LEFT JOIN platform_v6 p ON p.id = s.platform_fk
WHERE dataset_accession = "{}"
WHERE dataset_accession = %s
GROUP BY d.id, p.id
'''.format(dataset_accession), engine).drop_duplicates().T.to_dict()[0]
''', engine, params=(dataset_accession,)).drop_duplicates().T.to_dict()[0]
# dataset['date'] = dataset['date'].strftime('%b %d, %Y')
return render_template('analyze/example.html', dataset=dataset)

Expand Down
6 changes: 3 additions & 3 deletions website/app/static/py/TableManager.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def getUID(engine, idtype='table'):
uid = 'AJ'+uid

# Check if exists
duplicate = len(pd.read_sql_query('SELECT * FROM user_dataset WHERE dataset_uid = "{}"'.format(uid), engine).index)
duplicate = len(pd.read_sql_query('SELECT * FROM user_dataset WHERE dataset_uid = %s', engine, params=(uid,)).index)

# Return
return uid
Expand Down Expand Up @@ -157,8 +157,8 @@ def uploadToDatabase(data, dataset_uid, engine, user_id, dataset_title, alignmen
sample_dataframe.to_sql('user_sample', engine, if_exists='append', index=False)

# Get sample FK
sample_names = '", "'.join(sample_dataframe['sample_name'])
sample_fk_dataframe = pd.read_sql_query('SELECT sample_name, id AS user_sample_fk FROM user_sample WHERE user_dataset_fk = {dataset_id} AND sample_name IN ("{sample_names}")'.format(**locals()), engine)
sample_names = sample_dataframe['sample_name']
sample_fk_dataframe = pd.read_sql_query('SELECT sample_name, id AS user_sample_fk FROM user_sample WHERE user_dataset_fk = %s AND sample_name IN ({})'.format(','.join('%s' for _ in sample_names)), engine, params=(dataset_id, *sample_names))

# Upload sample metadata
sample_metadata_dataframe = pd.melt(metadata_dataframe.reset_index(), id_vars='sample_name').merge(sample_fk_dataframe, on='sample_name').drop('sample_name', axis=1)
Expand Down

0 comments on commit 3e411b9

Please sign in to comment.