How to use the streamlit.write function in streamlit

To help you get started, we’ve selected a few streamlit examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github leenamurgai / debias-ml / source / analysis_oversampling.py View on Github external
y_pred = train_predict_new(clf_nn, X_train_new, y_train_new,
                                   X_test_new, y_test_new, results_df, 0)
plot_distributions(y_pred, Z_test_new, target_feature, sensitive_features,
                   bias_cols, categories, 0, results_df, 'fair-data')

################################################################################

st.write('')
st.subheader('3.4 ...after oversampling by different amounts')
st.write('')

results_df = pd.DataFrame()

#for factor in np.linspace(0.0, 5.0, num=11):
for factor in range(1, 11):
    st.write('**Oversample factor:**', factor)
    # Oversampling to address bias in the training dataset
    X_new, y_new, Z_new = oversampler.get_oversampled_data(factor)
    # Shuffle the data after oversampling
    X_train_new, y_train_new, Z_train_new = shuffle(X_new, y_new, Z_new,
                                                    random_state=0)
    # initialise NeuralNet Classifier
    clf_nn = nn_classifier(n_features=X_train_new.shape[1])
    # make predictions on the test set
    y_pred = train_predict_new(clf_nn, X_train_new, y_train_new,
                                       X_test, y_test, results_df, factor)
    plot_distributions(y_pred, Z_test, target_feature, sensitive_features,
                       bias_cols, categories, factor, results_df,
                       'fair-algo-'+str(factor))

st.table(results_df)
github hundredblocks / semantic-search / demo.py View on Github external
We can even search for a combination of multiple words by averaging word vectors together
    """)
    with st.echo():
        results = vector_search.search_index_by_value(np.mean([word_vectors["cat"], word_vectors["sofa"]], axis=0),
                                                      image_index, file_mapping)

    st.write('\n'.join('- `%s`' % elt for elt in results))
    show_top_n(9, results, search_by_img=False)
    st.write("""
    This is a fantastic result, as most those images contain some version of a furry animal and a sofa 
    (I especially enjoy the leftmost image on the second row, which seems like a bag of furriness next to a couch)! 
    Our model, which was only trained on single words, can handle combinations of two words. 
    We have not built Google Image Search yet, but this is definitely impressive for a relatively simple architecture.
    """)
    st.write("Hope you enjoyed it! [Let me know](https://twitter.com/EmmanuelAmeisen) "
             "if you have any questions, feedback, or comments.")
    st.header("Fin")
github MarcSkovMadsen / awesome-streamlit / gallery / iris_eda_app / iris_eda_app.py View on Github external
if st.checkbox("Preview DataFrame"):
        if st.button("Head"):
            st.write(data.head())
        if st.button("Tail"):
            st.write(data.tail())
        else:
            st.write(data.head(2))

    # Show Entire Dataframe
    if st.checkbox("Show All DataFrame"):
        st.dataframe(data)

    # Show All Column Names
    if st.checkbox("Show All Column Name"):
        st.text("Columns:")
        st.write(data.columns)

    # Show Dimensions and Shape of Dataset
    data_dim = st.radio("What Dimension Do You Want to Show", ("Rows", "Columns"))
    if data_dim == "Rows":
        st.text("Showing Length of Rows")
        st.write(len(data))
    if data_dim == "Columns":
        st.text("Showing Length of Columns")
        st.write(data.shape[1])

    # Show Summary of Dataset
    if st.checkbox("Show Summary of Dataset"):
        st.write(data.describe())

    # Selection of Columns
    species_option = st.selectbox(
github streamlit / streamlit / e2e / scripts / empty_dataframes.py View on Github external
import pandas as pd
import numpy as np

st.header("Empty list")
st.write([])

st.header("Empty dataframes")
st.write(np.array(0))
st.write(pd.DataFrame([]))
st.dataframe()
st.dataframe([])
st.dataframe(np.array(0))
st.dataframe(pd.DataFrame([]))

st.header("Empty one-column dataframes")
st.write(np.array([]))
st.dataframe(np.array([]))

st.header("Empty two-column dataframes (only shows 1)")
st.write(pd.DataFrame({"lat": [], "lon": []}))
st.dataframe(pd.DataFrame({"lat": [], "lon": []}))

st.header("Empty tables")
st.table()
st.table([])
st.table(np.array(0))
st.table(pd.DataFrame([]))

st.header("Empty one-column table")
st.table(np.array([]))

st.header("Empty two-column table")
github zacheberhart / Learning-to-Feel / src / app.py View on Github external
show_spectros = st.checkbox('Show Spectrograms', value = False)

	# check if a track_id has been entered
	if len(track_id) > 0:
	
		# get track from Spotify API
		track = get_spotify_track(track_id)
		st.subheader('Track Summary')
		st.table(get_track_summmary_df(track))

		# check if there is track preview available from Spotify
		if track['preview_url']:

			# display 30 second track preview
			st.subheader('Track Preview (What the Algorithm "Hears")')
			st.write('')
			preview = get_track_preview(track_id)
			st.audio(preview)

			# get top and bottom labels for the track
			st.subheader('Track Analysis')
			track_df = deepcopy(DF[DF.track_id == track_id].reset_index(drop = True))

			# return values from db if already classified, otherwise classify
			if len(track_df) > 0:
				track_df = deepcopy(track_df.iloc[:,5:].T.rename(columns = {0: 'score'}).sort_values('score', ascending = False))
				st.table(pd.DataFrame({'Top 5': track_df[:5].index.tolist(), 'Bottom 5': track_df[-5:].index.tolist()}))
				if show_spectros: generate_spectros(preview)
			else:
				generate_spectros(preview)
				track_df = get_predictions()
				st.table(pd.DataFrame({'Top 5': track_df[:5].index.tolist(), 'Bottom 5': track_df[-5:].index.tolist()}))
github awarebayes / RecNN / examples / streamlit_demo.py View on Github external
if st.checkbox('Print batch info'):
            st.subheader('State')
            st.write(state)
            st.subheader('Action')
            st.write(action)
            st.subheader('Reward')
            st.write(reward.squeeze())

        st.subheader('(Optional) Select the state are getting the recommendations for')

        action_id = np.random.randint(0, state.size(0), 1)[0]
        action_id_manual = st.checkbox('Manually set state index')
        if action_id_manual:
            action_id = st.slider("Choose state index:", min_value=0, max_value=state.size(0))

        st.write('state:', state[action_id])

        algorithm = st.selectbox('Choose an algorithm', ('ddpg', 'td3'))
        metric = st.selectbox('Choose a metric', ('euclidean', 'cosine', 'correlation',
                                                  'canberra', 'minkowski', 'chebyshev',
                                                  'braycurtis', 'cityblock',))
        topk = st.slider("TOP K items to recommend:", min_value=1, max_value=30, value=7)

        dist = {'euclidean': distance.euclidean, 'cosine': distance.cosine,
                'correlation': distance.correlation, 'canberra': distance.canberra,
                'minkowski': distance.minkowski, 'chebyshev': distance.chebyshev,
                'braycurtis': distance.braycurtis, 'cityblock': distance.cityblock}

        action = models[algorithm].forward(state)

        st.markdown('**Recommendations for state with index {}**'.format(action_id))
        st.write(rank(action[action_id].detach().cpu().numpy(), dist[metric], topk))
github leenamurgai / debias-ml / source / analysis_oversampling.py View on Github external
st.write('')

# Set up the Oversampler
oversampler = Oversampler(X_train, y_train, Z_train,
                          target_col, bias_cols, bias_col_types)
oversampler.original_data_stats()
X_new, y_new, Z_new = oversampler.get_oversampled_data()

st.write('Augmented data set: {} samples'.format(X_new.shape[0]))

# Work out how many data points we need to train from our augmented dataset ()
new_n_train = X_new.shape[0] * n_train / X_all.shape[0]
new_n_train = int(new_n_train - new_n_train % 3)

st.write('')
st.write('**We split our augmented data set into training and test sets:**')

(X_train_new, X_train2_new, X_train1_new, X_test_new,
y_train_new, y_train2_new, y_train1_new, y_test_new,
Z_train_new, Z_test_new) = make_training_and_test_sets(X_new, y_new, Z_new, new_n_train)

st.write('Augmented training set: {} samples'.format(X_train_new.shape[0]))
st.write('Augmented test set: {} samples'.format(X_test_new.shape[0]))

################################################################################
################################################################################
################################################################################

st.header('3 Training a 3 layer neural network...')
st.write('')
st.subheader('3.1 ...on all the data')
st.write('')
github MarcSkovMadsen / awesome-streamlit / gallery / yahoo_finance_app / yahoo_finance_app.py View on Github external
period2 = st.sidebar.slider(
            "SMA2 period", min_value=5, max_value=500, value=100, step=1
        )
        data[f"SMA2 {period2}"] = data["Adj Close"].rolling(period2).mean()
        data2[f"SMA2 {period2}"] = data[f"SMA2 {period2}"].reindex(data2.index)

    st.subheader("Chart")
    st.line_chart(data2)

    if st.sidebar.checkbox("View stadistic"):
        st.subheader("Stadistic")
        st.table(data2.describe())

    if st.sidebar.checkbox("View quotes"):
        st.subheader(f"{asset} historical data")
        st.write(data2)

    st.sidebar.title("About")
    st.sidebar.info(
        "This app is a simple example of "
        "using Strealit to create a financial data web app.\n"
github streamlit / streamlit / examples / sharing / benchmark.py View on Github external
sum_operation = tf.reduce_sum(dot_operation)


startTime = datetime.now()


# Shared GPU has memory sharing issues
# https://www.tensorflow.org/guide/using_gpu
config = tf.ConfigProto(log_device_placement=True)
config.gpu_options.allow_growth = True
with tf.Session(config=config) as session:
    result = session.run(sum_operation)
    st.write(result)

st.write("Shape:", shape, "Device:", device_name)
st.write("Time taken:", datetime.now() - startTime)
github streamlit / streamlit / e2e / scripts / reuse_label.py View on Github external
if st.button("Rerun test"):
    st.test_run_count = -1

if hasattr(st, "test_run_count"):
    st.test_run_count += 1
else:
    st.test_run_count = 0 if st.get_option("server.headless") else -1

if st.test_run_count < 1:
    w1 = st.slider("label", 0, 100, 25, 1)
else:
    w1 = st.selectbox("label", ("m", "f"), 1)

st.write("value 1:", w1)
st.write("test_run_count:", st.test_run_count)
st.write(
    """
    If this is failing locally, it could be because you have a browser with