python – How to convert .json to .ipynb?

Similar as this one which contains answers that don’t work. Here’s a notebook taken from this dataset which I’m trying to convert:

{"cell_type":{"1862f0a6":"code","2a9e43d6":"code","038b763d":"code","2eefe0ef":"code","0beab1cd":"code","9a78ab76":"code","ebe125d5":"code","d9dced8b":"code","86497fe1":"code","e2c8e725":"code","ff7c44ed":"code","0e7c906e":"code","dd0c804a":"code","781bbf3c":"code","bd94f005":"code","62638fba":"code","bb69e88c":"code","6b5664c7":"code","23783525":"code","8522781a":"code","8ca8392c":"code","17ec3fc4":"code","76512d50":"code","a98c5d9f":"code","06365725":"code","59959af5":"code","80151ab7":"code","5bf9ca51":"code","f5504853":"code","9f50dca0":"code","21616367":"markdown","fcb6792d":"markdown","63c26fa2":"markdown","4bb2e30a":"markdown","a6357f7e":"markdown","45082c89":"markdown","77e56113":"markdown","448eb224":"markdown","032e2820":"markdown","8554b284":"markdown","36002912":"markdown","ac301a84":"markdown","23705731":"markdown","1496beaf":"markdown","2e1a5949":"markdown","7e2f170a":"markdown","bfbde93e":"markdown","0d136e08":"markdown","915643b3":"markdown","8ffe0b25":"markdown","8a4c95d1":"markdown","b69a4f9b":"markdown","c3ce0945":"markdown","3eebeb87":"markdown","1ae087ab":"markdown","aaad8355":"markdown","503926eb":"markdown","3e5f860d":"markdown"},"source":{"1862f0a6":"# This Python 3 environment comes with many helpful analytics libraries installedn# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-pythonn# For example, here's several helpful packages to loadnnimport numpy as np # linear algebranimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)nn# Input data files are available in the read-only "../input/" directoryn# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directorynnimport osnfor dirname, _, filenames in os.walk('/kaggle/input'):n    for filename in filenames:n        print(os.path.join(dirname, filename))nn# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" n# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session","2a9e43d6":"import numpy as npnimport pandas as pdnimport randomnnfrom sklearn.model_selection import train_test_split, cross_val_scorenfrom sklearn.preprocessing import StandardScaler, RobustScalernfrom catboost import CatBoostRegressornfrom sklearn.ensemble import RandomForestRegressornfrom sklearn.metrics import r2_score as r2nfrom sklearn.model_selection import KFold, GridSearchCVnnfrom datetime import datetimennimport matplotlibnimport matplotlib.pyplot as pltnimport seaborn as snsn%matplotlib inline","038b763d":"import warningsnwarnings.filterwarnings('ignore')","2eefe0ef":"matplotlib.rcParams.update({'font.size': 14})","0beab1cd":"def evaluate_preds(train_true_values, train_pred_values, test_true_values, test_pred_values):n    print("Train R2:\t" + str(round(r2(train_true_values, train_pred_values), 3)))n    print("Test R2:\t" + str(round(r2(test_true_values, test_pred_values), 3)))n    n    plt.figure(figsize=(18,10))n    n    plt.subplot(121)n    sns.scatterplot(x=train_pred_values, y=train_true_values)n    plt.xlabel('Predicted values')n    plt.ylabel('True values')n    plt.title('Train sample prediction')n    n    plt.subplot(122)n    sns.scatterplot(x=test_pred_values, y=test_true_values)n    plt.xlabel('Predicted values')n    plt.ylabel('True values')n    plt.title('Test sample prediction')nn    plt.show()","9a78ab76":"TRAIN_DATASET_PATH = '/kaggle/input/real-estate-price-prediction-moscow/train.csv'nTEST_DATASET_PATH = '/kaggle/input/real-estate-price-prediction-moscow/test.csv'","ebe125d5":"train_df = pd.read_csv(TRAIN_DATASET_PATH)ntrain_df.tail()","d9dced8b":"train_df.dtypes","86497fe1":"num_feat = list(train_df.select_dtypes(exclude="object").columns)nobj_feat = list(train_df.select_dtypes(include="object").columns)ntarget="Price"nnnum_feat","e2c8e725":"test_df = pd.read_csv(TEST_DATASET_PATH)ntest_df.tail()","ff7c44ed":"print('u0421u0442u0440u043eu043a u0432 u0442u0440u0435u0439u043du0435:', train_df.shape[0])nprint('u0421u0442u0440u043eu043a u0432 u0442u0435u0441u0442u0435', test_df.shape[0])","0e7c906e":"train_df.shape[1] - 1 == test_df.shape[1]","dd0c804a":"submission_df = pd.read_csv('/kaggle/input/real-estate-price-prediction-moscow/sample_submission.csv')","781bbf3c":"train_df['Id'] = train_df['Id'].astype(str)","bd94f005":"train_df[num_feat].hist(n    figsize=(16,16)n)nplt.show()","62638fba":"train_df.describe().T","bb69e88c":"grid = sns.jointplot(train_df['Rooms'], train_df['Price'], kind='reg')ngrid.fig.set_figwidth(8)ngrid.fig.set_figheight(8)","6b5664c7":"grid = sns.jointplot(train_df['KitchenSquare'], train_df['Price'], kind='reg')ngrid.fig.set_figwidth(8)ngrid.fig.set_figheight(8)","23783525":"train_df_temp = train_df.loc[train_df['KitchenSquare']<250]ngrid = sns.jointplot(train_df_temp['KitchenSquare'], train_df_temp['Price'], kind='reg')ngrid.fig.set_figwidth(8)ngrid.fig.set_figheight(8)","8522781a":"plt.figure(figsize = (16, 8))nntrain_df['Price'].hist(bins=30)nplt.ylabel('Count')nplt.xlabel('Price')nnplt.title('Target distribution')nplt.show()","8ca8392c":"correlation = train_df.corrwith(train_df['Price']).sort_values(ascending=False)ncorrelation.drop('Price', inplace=True)nnplt.figure(figsize = (16, 8))nplt.bar(correlation.index, correlation)nplt.xticks(rotation='90')nplt.xlabel('Features', fontsize=15)nplt.ylabel('Correlation', fontsize=15)nplt.title('Feature correlation', fontsize=15)nplt.show()","17ec3fc4":"class Data:n    n    def __init__(self):n        """u041au043eu043du0441u0442u0430u043du0442u044b u0434u043bu044f u043eu0431u0440u0430u0431u043eu0442u043au0438 u0432u044bu0431u0440u043eu0441u043eu0432 u043du0430 u043eu0441u043du043eu0432u0435 u0430u043du0430u043bu0438u0437u0430 u0434u0430u043du043du044bu0445"""n        self.Square_min = 15n        self.Square_max = 300n        n        self.LifeSquare_min = 10n        self.LifeSquare_max = 280n        n        self.Rooms_min = 1n        self.Rooms_max = 5n        n        self.HouseFloor_min = 1n        self.HouseFloor_max = 50n        n        self.KitchenSquare_min = 3n        self.KitchenSquare_max = 30n        n        self.current_year = datetime.now().yearn        n        self.medians = Nonen        self.DistrictId_value_counts = Nonen        self.SquareMeterPrice_by_DistrictId = Nonen        self.Healthcare_1_by_DistrictId = Nonen        n        n    def fit(self, train_df):n        n        # u043cu0435u0434u0438u0430u043du043du044bu0435 u0437u043du0430u0447u0435u043du0438u044fn        self.medians = train_df[['LifeSquare', 'HouseFloor']].median()n        n        # u043fu043eu0434u0441u0447u0435u0442 u043fu043eu043fu0443u043bu044fu0440u043du044bu0445 u0440u0430u0439u043eu043du043eu0432n        self.DistrictId_value_counts = dict(train_df['DistrictId'].value_counts())n        n        # u043fu043eu0434u0441u0447u0435u0442 u0441u0440u0435u0434u043du0435u0439 u0446u0435u043du044b u0437u0430 u043c2 u043fu043e u0440u0430u0439u043eu043du0443n        train_df_temp = train_df.loc[((train_df['Square'] > self.Square_min) & (train_df['Square'] < self.Square_max))]n        train_df_temp["SquareMeterPrice"] = train_df_temp["Price"] / train_df_temp["Square"]n        self.SquareMeterPrice_by_DistrictId = train_df_temp.groupby('DistrictId', as_index=False)\n            .agg({'SquareMeterPrice': 'mean'})\n            .rename(columns={'SquareMeterPrice': 'AverageSquareMeterPrice'})n        n        # u043fu043eu0434u0441u0447u0435u0442 u0441u0440u0435u0434u043du0435u0433u043e u0437u043du0430u0447u0435u043du0438u044f u043fu0440u0438u0437u043du0430u043au0430 Healthcare_1 u043fu043e u0440u0430u0439u043eu043du0443n        self.Healthcare_1_by_DistrictId = train_df.groupby('DistrictId', as_index=False)\n            .agg({'Healthcare_1': 'mean'})\n            .rename(columns={'Healthcare_1': 'AverageHealthcare_1'})n        n        del train_df_tempn        n    def transform(self, train_df):n        n        # u041eu0431u0440u0430u0431u043eu0442u043au0430 u043fu0440u043eu043fu0443u0441u043au043eu0432n        train_df[['LifeSquare', 'HouseFloor']] = train_df[['LifeSquare', 'HouseFloor']].fillna(self.medians)n        n        # u041eu0431u0440u0430u0431u043eu0442u043au0430 u0432u044bu0431u0440u043eu0441u043eu0432n        n        # u043fu043bu043eu0449u0430u0434u044cn        train_df.loc[(train_df['Square'] > self.Square_max), 'Square'] = self.Square_maxn        train_df.loc[(train_df['Square'] < self.Square_min), 'Square'] = self.Square_minn        n        # u0436u0438u043bu0430u044f u043fu043bu043eu0449u0430u0434u044cn        train_df.loc[(train_df['LifeSquare'] < self.LifeSquare_min), 'LifeSquare'] = self.LifeSquare_minn        train_df.loc[(train_df['LifeSquare'] > self.LifeSquare_max), 'LifeSquare'] = self.LifeSquare_maxn        n        # u043fu043bu043eu0449u0430u0434u044c u043au0443u0445u043du0438n        train_df.loc[(train_df['KitchenSquare'] < self.KitchenSquare_min), 'KitchenSquare'] = self.KitchenSquare_minn        train_df.loc[(train_df['KitchenSquare'] > self.KitchenSquare_max), 'KitchenSquare'] = self.KitchenSquare_maxn        n        # u0433u043eu0434 u043fu043eu0441u0442u0440u043eu0439u043au0438 u0434u043eu043cu0430n        train_df.loc[(train_df['HouseYear'] > self.current_year), 'HouseYear'] = self.current_yearn        n        # u043au043eu043bu0438u0447u0435u0441u0442u0432u043e u043au043eu043cu043du0430u0442n        train_df.loc[(train_df['Rooms'] > self.Rooms_max), 'Rooms'] = self.Rooms_maxn        train_df.loc[(train_df['Rooms'] < self.Rooms_min), 'Rooms'] = self.Rooms_minn        n        # u043au043eu043bu0438u0447u0435u0441u0442u0432u043e u044du0442u0430u0436u0435u0439n        train_df.loc[(train_df['HouseFloor'] < self.HouseFloor_min), 'HouseFloor'] = self.HouseFloor_minn        train_df.loc[(train_df['HouseFloor'] > self.HouseFloor_max), 'HouseFloor'] = self.HouseFloor_maxn        n        # u0435u0441u043bu0438 u044du0442u0430u0436 u0431u043eu043bu044cu0448u0435 u044du0442u0430u0436u043du043eu0441u0442u0438 u0434u043eu043cu0430, u0442u043e u043fu0440u0438u0441u0432u0430u0438u0432u0430u0435u043c u0441u043bu0443u0447u0430u0439u043du044bu0439 u044du0442u0430u0436 u043eu0442 self.HouseFloor_min u0434u043e u043cu0430u043au0441u0438u043cu0430u043bu044cu043du043eu0433u043e u044du0442u0430u0436u0430 u0432 u0434u043eu043cu0435n        floor_outliers = train_df.loc[train_df['Floor'] > train_df['HouseFloor']].indexn        train_df.loc[floor_outliers, 'Floor'] = train_df.loc[floor_outliers, 'HouseFloor'].apply(lambda x: self.HouseFloor_min if (self.HouseFloor_min == x) else np.random.randint(self.HouseFloor_min, x))n        n        # u041eu0431u0440u0430u0431u043eu0442u043au0430 u043au0430u0442u0435u0433u043eu0440u0438u0439n        train_df = pd.concat([train_df, pd.get_dummies(train_df['Ecology_2'], prefix='Ecology_2', dtype="int8")], axis=1)n        train_df = pd.concat([train_df, pd.get_dummies(train_df['Ecology_3'], prefix='Ecology_3', dtype="int8")], axis=1)n        train_df = pd.concat([train_df, pd.get_dummies(train_df['Shops_2'], prefix='Shops_2', dtype="int8")], axis=1)n        n        return train_dfn    n    def features(self, train_df):n        n        # u0434u043eu0431u0430u0432u043bu0435u043du0438u0435 u043fu0440u0438u0437u043du0430u043au0430 u043fu043eu043fu0443u043bu044fu0440u043du043eu0441u0442u0438 u0440u0430u0439u043eu043du0430n        train_df['DistrictId_counts'] = train_df['DistrictId'].map(self.DistrictId_value_counts)n        train_df['DistrictId_counts'].fillna(train_df['DistrictId_counts'].median(), inplace=True)n        n        # u0434u043eu0431u0430u0432u043bu0435u043du0438u0435 u043fu0440u0438u0437u043du0430u043au0430 u0441u0440u0435u0434u043du0435u0439 u0441u0442u043eu0438u043cu043eu0441u0442u0438 u043c2 u043fu043e u0440u0430u0439u043eu043du0443n        train_df = train_df.merge(self.SquareMeterPrice_by_DistrictId, on=["DistrictId"], how='left')n        train_df['AverageSquareMeterPrice'].fillna(train_df['AverageSquareMeterPrice'].median(), inplace=True)n        n        # u0434u043eu0431u0430u0432u043bu0435u043du0438u0435 u043fu0440u0438u0437u043du0430u043au0430 u0441u0440u0435u0434u043du0435u0433u043e u0437u043du0430u0447u0435u043du0438u044f Healthcare_1 u043fu043e u0440u0430u0439u043eu043du0443n        train_df = train_df.merge(self.Healthcare_1_by_DistrictId, on=["DistrictId"], how='left')n        train_df['AverageHealthcare_1'].fillna(train_df['AverageHealthcare_1'].median(), inplace=True)n        n        return train_df","76512d50":"data_inst = Data()nn# u0442u0440u0435u043du0438u0440u043eu0432u043eu0447u043du044bu0435 u0434u0430u043du043du044bu0435ndata_inst.fit(train_df)ntrain_df = data_inst.transform(train_df)ntrain_df = data_inst.features(train_df)nn# u0432u0430u043bu0438u0434u0430u0446u0438u043eu043du043du044bu0435 u0434u0430u043du043du044bu0435ntest_df = data_inst.transform(test_df)ntest_df = data_inst.features(test_df)","a98c5d9f":"feature_names = ['AverageSquareMeterPrice', 'DistrictId_counts', 'Rooms', 'Square', 'LifeSquare', 'KitchenSquare', 'Floor',n                    'HouseFloor', 'HouseYear', 'Helthcare_2', 'Ecology_1', 'Social_1', 'Social_2', 'Social_3',n                    'Shops_1', 'Ecology_2_A', 'Ecology_2_B', 'Ecology_3_A', 'Ecology_3_B', 'Shops_2_A', 'Shops_2_B',n                    'AverageHealthcare_1']ntarget_name="Price"","06365725":"train_df = train_df[feature_names + [target_name]]ntest_df = test_df[feature_names + ['Id']]nX = train_df[feature_names]ny = train_df[target_name]","59959af5":"final_model = CatBoostRegressor(n    silent=True,n    learning_rate=0.1,n    iterations=1150,n    eval_metric="R2",n    depth=8n)nnfinal_model.fit(X, y)nncv_score = cross_val_score(n    final_model,n    X,n    y,n    scoring='r2',n    cv=KFold(n            n_splits=5,n            shuffle=True,n            random_state=42n    )n)","80151ab7":"print(f'R2: {round(cv_score.mean(), 3)}')","5bf9ca51":"feature_importances = pd.DataFrame(n    zip(X.columns, final_model.get_feature_importance()),n    columns=['feature_name', 'importance']n)nnfeature_importances.sort_values(by='importance', ascending=False, inplace=True)nfeature_importances.head(20)","f5504853":"preds_final = pd.DataFrame()npreds_final['Id'] = test_df['Id'].copy()nntest_df.set_index('Id', inplace=True)ntest_df = test_df[feature_names]","9f50dca0":"y_pred_final = final_model.predict(test_df)nnsubmission_df['Price'] = y_pred_finalnsubmission_df.to_csv('./predictions.csv', index=False, encoding='utf-8', sep=',')nnsubmission_df.head()","21616367":"*u0414u0435u043bu0435u043du0438u0435 u043fu0440u0438u0437u043du0430u043au043eu0432 u043du0430 u0447u0438u0441u043bu043eu0432u044bu0435 u0438 u0442u0435u043au0441u0442u043eu0432u044bu0435*","fcb6792d":"**u0421u043eu0440u0442u0438u0440u043eu0432u043au0430 u043fu0440u0438u0437u043du0430u043au043eu0432 u043fu043e u0432u0430u0436u043du043eu0441u0442u0438**","63c26fa2":"u0412u044bu0431u0440u043eu0441u044b u043du0430u0431u043bu044eu0434u0430u044eu0442u0441u044f u0432: HouseYear, KitchenSquare.nnu041fu0440u0438u0437u043du0430u043au0438 u0441 u0430u043du043eu043cu0430u043bu044cu043du043e u0432u044bu0441u043eu043au0438u043c u0437u043du0430u0447u0435u043du0438u0435u043c, u043au043eu0442u043eu0440u044bu0435 u043du0443u0436u043du043e u0431u0443u0434u0435u0442 u043eu0433u0440u0430u043du0438u0447u0438u0442u044c: HouseFloor, LifeSquare, Rooms, Square.","4bb2e30a":"**u041fu043eu0438u0441u043a u043fu0440u0438u0437u043du0430u043au043eu0432 u0441 u0432u044bu0431u0440u043eu0441u0430u043cu0438**","a6357f7e":"*u0412u044bu0432u043eu0434u0438u043c u0441u043au043eu043bu044cu043au043e u0441u0442u0440u043eu043a u0432 u0442u0435u0441u0442u0435 u0438 u043du0430 u0442u0440u0435u0439u043du0435*","45082c89":"**u041fu0440u0438u0432u0435u0434u0435u043du0438u0435 u0442u0438u043fu043eu0432**","77e56113":"**u0423u0441u0442u0430u043du0430u0432u043bu0438u0432u0430u0435u043c u0437u043du0430u0447u0435u043du0438u044f, u0447u0442u043eu0431u044b u0432u0435u0437u0434u0435 u0431u044bu043b u043eu0434u0438u043du0430u043au043eu0432u044bu0439 u0448u0440u0438u0444u0442 u0438 u0440u0430u0437u043cu0435u0440**","448eb224":"**u0418u043cu043fu043eu0440u0442u0438u0440u0443u0435u043c u043du0435u043eu0431u0445u043eu0434u0438u043cu044bu0435 u0434u043bu044f u0440u0430u0431u043eu0442u044b u0444u0443u043du043au0446u0438u0438 u0438 u043au043bu0430u0441u0441u044b**","032e2820":"u0421u043eu0437u0434u0430u0435u043c u0441u043fu0438u0441u043eu043a u043fu0440u0438u0437u043du0430u043au043eu0432, u0438u0441u043fu043eu043bu044cu0437u0443u0435u043cu044bu0445 u0432 u043cu043eu0434u0435u043bu0438 - u043eu0442u0431u043eu0440 u043fu0440u0438u0437u043du0430u043au043eu0432","8554b284":"**u041eu0431u0443u0447u0435u043du0438u0435 u043cu043eu0434u0435u043bu0438 u043du0430 CatBoostRegressor**nnu0412u044bu0447u0438u0441u043bu0435u043du0438u044f u0433u0438u043fu0435u0440u043fu0430u0440u0430u043cu0435u0442u0440u043eu0432 u043cu043eu0434u0435u043bu0438 u043fu0440u0438 u043fu043eu043cu043eu0449u0438 randomized_search() learning_rate=0.1 iterations=1150 depth=8","36002912":"u0417u0430 u0432u044bu0431u0440u043eu0441 u043fu043eu0441u0447u0438u0442u0430u0435u043c u0437u043du0430u0447u0435u043du0438u044f u043cu0435u043du0435u0435 3 u043au0432.u043c. u0438 u0431u043eu043bu044cu0448u0435 30 u043au0432.u043c.","ac301a84":"*u041du0430 u043eu0431u0443u0447u0435u043du0438u0438 u043du0430 u043eu0434u0438u043d u043fu0440u0438u0437u043du0430u043a u0431u043eu043bu044cu0448u0435, u0447u0435u043c u043du0430 u0442u0435u0441u0442u0435*","23705731":"**u0421u0447u0438u0442u044bu0432u0430u0435u043c u043eu0431u0443u0447u0430u044eu0449u0438u0439 u043du0430u0431u043eu0440 u0434u0430u043du043du044bu0445**","1496beaf":"u041au043eu0440u0440u0435u043bu044fu0446u0438u044f","2e1a5949":"u041eu0446u0435u043du043au0430 u043cu043eu0434u0435u043bu0438","7e2f170a":"**u041fu043eu0434u043au043bu044eu0447u0430u0435u043c u043fu0440u0435u0434u0443u043fu0440u0435u0436u0434u0435u043du0438u044f**","bfbde93e":"**u0413u0440u0430u0444u0438u043a u0440u0430u0441u043fu0440u0435u0434u0435u043bu0435u043du0438u044f u0446u0435u043bu0435u0432u043eu0439 u043fu0435u0440u0435u043cu0435u043du043du043eu0439 - u0446u0435u043du044b**","0d136e08":"**u0417u0430u0433u0440u0443u0437u043au0430 u0434u0430u043du043du044bu0445**","915643b3":"**u0421u043eu0437u0434u0430u043du0438u0435 u0434u0430u0442u0430u0444u0440u0435u0439u043cu0430 u0441 u043fu0440u0435u0434u0441u043au0430u0437u0430u043du0438u044fu043cu0438**","8ffe0b25":"**u0423u043au0430u0437u044bu0432u0430u0435u043c u043fu0443u0442u044c u043a u0444u0430u0439u043bu0430u043c u0441 u0434u0430u043du043du044bu043cu0438**","8a4c95d1":"*u041eu043fu0438u0441u0430u043du0438u0435 u0434u0430u0442u0430u0441u0435u0442u0430*nn**Id** - u0438u0434u0435u043du0442u0438u0444u0438u043au0430u0446u0438u043eu043du043du044bu0439 u043du043eu043cu0435u0440 u043au0432u0430u0440u0442u0438u0440u044bnn**DistrictId** - u0438u0434u0435u043du0442u0438u0444u0438u043au0430u0446u0438u043eu043du043du044bu0439 u043du043eu043cu0435u0440 u0440u0430u0439u043eu043du0430nn**Rooms** - u043au043eu043bu0438u0447u0435u0441u0442u0432u043e u043au043eu043cu043du0430u0442nn**Square** - u043fu043bu043eu0449u0430u0434u044cnn**LifeSquare** - u0436u0438u043bu0430u044f u043fu043bu043eu0449u0430u0434u044cnn**KitchenSquare** - u043fu043bu043eu0449u0430u0434u044c u043au0443u0445u043du0438nn**Floor** - u044du0442u0430u0436nn**HouseFloor** - u043au043eu043bu0438u0447u0435u0441u0442u0432u043e u044du0442u0430u0436u0435u0439 u0432 u0434u043eu043cu0435nn**HouseYear** - u0433u043eu0434 u043fu043eu0441u0442u0440u043eu0439u043au0438 u0434u043eu043cu0430nn**Ecology_1, Ecology_2, Ecology_3** - u044du043au043eu043bu043eu0433u0438u0447u0435u0441u043au0438u0435 u043fu043eu043au0430u0437u0430u0442u0435u043bu0438 u043cu0435u0441u0442u043du043eu0441u0442u0438nn**Social_1, Social_2, Social_3** - u0441u043eu0446u0438u0430u043bu044cu043du044bu0435 u043fu043eu043au0430u0437u0430u0442u0435u043bu0438 u043cu0435u0441u0442u043du043eu0441u0442u0438nn**Healthcare_1, Helthcare_2** - u043fu043eu043au0430u0437u0430u0442u0435u043bu0438 u043cu0435u0441u0442u043du043eu0441u0442u0438, u0441u0432u044fu0437u0430u043du043du044bu0435 u0441 u043eu0445u0440u0430u043du043eu0439 u0437u0434u043eu0440u043eu0432u044cu044fnn**Shops_1, Shops_2** - u043fu043eu043au0430u0437u0430u0442u0435u043bu0438, u0441u0432u044fu0437u0430u043du043du044bu0435 u0441 u043du0430u043bu0438u0447u0438u0435u043c u043cu0430u0433u0430u0437u0438u043du043eu0432, u0442u043eu0440u0433u043eu0432u044bu0445 u0446u0435u043du0442u0440u043eu0432nn**Price** - u0446u0435u043du0430 u043au0432u0430u0440u0442u0438u0440u044b","b69a4f9b":"u0421u043eu0437u0434u0430u043du0438u044f u043au043bu0430u0441u0441u0430 u043fu043eu0434u0433u043eu0442u043eu0432u043au0438 u0434u0430u043du043du044bu0445","c3ce0945":"**u0421u0447u0438u0442u044bu0432u0430u0435u043c u0442u0435u0441u0442u043eu0432u044bu0439 u043du0430u0431u043eu0440 u0434u0430u043du043du044bu0445**","3eebeb87":"u0417u043du0430u0447u0435u043du0438u044f u043cu0435u043du044cu0448u0435 1 u0438 u0431u043eu043bu044cu0448u0435 250 u043eu0442u0441u0435u043au0430u0435u043c","1ae087ab":"**u0417u0430u0434u0430u0435u043c u0444u0443u043du043au0446u0438u044e u0434u043bu044f u043fu043eu0434u0441u0447u0435u0442u0430 u043cu0435u0442u0440u0438u043a**","aaad8355":"*u0422u0438u043f u0434u0430u043du043du044bu0445 u043eu0431u0443u0447u0430u044eu0449u0435u0433u043e u0441u0435u0442u0430*","503926eb":"u0418u043du0438u0446u0438u0430u043bu0438u0437u0430u0446u0438u044f u043au043bu0430u0441u0441u0430 Data","3e5f860d":"u041fu0440u0438u0437u043du0430u043au0438 Rooms, KitchenSquare, HouseFloor u0438u043cu0435u044eu0442 u0432 u043du0435u043au043eu0442u043eu0440u044bu0445 u043du0430u0431u043bu044eu0434u0435u043du0438u044fu0445 u043du0443u043bu0435u0432u044bu0435 u0437u043du0430u0447u0435u043du0438u044f"}}

I tried renaming it from notebook.json to notebook.ipynb but I get an error in jupyter:

Unreadable Notebook: notebook.ipynb <ValidationError: "Notebook could not be converted from version 1 to version 2 because it's missing a key: cells">

Leave a Comment