"bio({""constraints"":[],""type"":""STRING"",""dataSize"":500})","birthday({""constraints"":[],""type"":""DATETIME""})","chattiness({""constraints"":[],""type"":""INT"",""defaultValue"":""1""})","firstName({""constraints"":[],""type"":""STRING"",""dataSize"":250})","gender({""constraints"":[],""type"":""BOOLEAN"",""dataSize"":500})","image({""constraints"":[],""type"":""FILE_REF"",""dataSize"":500,""defaultValue"":""""})","lastName({""constraints"":[],""type"":""STRING"",""dataSize"":500})","token({""constraints"":[],""type"":""STRING"",""dataSize"":500,""dynamicProps"":{""localData"":{}}})","objectId({""constraints"":[],""type"":""STRING_ID"",""dataSize"":36})","ownerId({""constraints"":[],""type"":""STRING"",""dataSize"":36})","created({""constraints"":[],""type"":""DATETIME""})","updated({""constraints"":[],""type"":""DATETIME""})","countryInformations({""constraints"":[],""type"":""RELATION"",""autoLoad"":false,""relatedTable"":""countryInformation""})","statistics({""constraints"":[""UQ""],""type"":""RELATION"",""autoLoad"":false,""relatedTable"":""userStatistics""})"
I got this CSV as I exported Person table from Backendless.com. I never saw such CSV format. does this structure has a proper name that I can look up on the internet? is there any software that can read it and accordingly import it to a mysql db?
CodePudding user response:
I'm going to withdraw my objection to calling this a CSV file. That's exactly what it is. This doesn't do the whole job, but maybe this will give you a start. This assumes there is only one line in the file; if you have multiple tables in separate files, then you'd need a loop instead of using next()
on the cvs.reader
.
import json
import csv
row = next(csv.reader(open('x.txt')))
for f in row:
column,_,rest = f.partition('(')
data = json.loads(rest[:-1])
print(column, data)
Now you can print data['type']
and data['dataSize']
. There are still some awkward things here. It shows fields with relations to other tables, but it doesn't say what type those fields are.
CodePudding user response:
based on the code provided by Tim Roberts, I wrote a script that converts Backendless CSV schema to MYSQL 8.0 Schema
import json
import csv
from os import listdir
import mysql.connector
host="host"
user="admin"
password="admin"
database="mydb"
# when true all tables with the same names will be dropped before a new ones are created
drop_existing_tables = False
mydb = mysql.connector.connect(
host=host,
user=user,
password=password,
database=database
)
path = "./"
filenames = listdir(path)
files = [ filename for filename in filenames if filename.endswith(".csv") ]
# select table
commands = "USE " database ";\n"
#to be able to reference a table before it s even created.
commands = "SET foreign_key_checks = 0;\n"
def constraints_to_string(constraints):
if len(constraints) == 0:
return ""
stringConstraints = ""
addIndex = False
for constraint in constraints:
if constraint == "NN":
constraint = "NOT NULL"
if constraint == "UQ":
constraint = "UNIQUE"
if constraint == "IDX":
addIndex = True
if not addIndex:
stringConstraints = " " str(constraint)
if addIndex:
stringConstraints = ",\n INDEX (`" column "`)"
return stringConstraints
def map_data_type(data, column, constraints):
dataType = data["type"]
if column == "ownerId":
dataType = "BINARY(16) " constraints ", " \
" FOREIGN KEY (`" column "`) REFERENCES `users`(`objectId`)"
elif dataType == "STRING":
dataSize = data.get("dataSize")
dataType = "VARCHAR(" str(dataSize) ") " constraints
elif dataType == "STRING_ID":
dataType = "BINARY(16) NOT NULL PRIMARY KEY " constraints
elif dataType == "RELATION":
dataType = "BINARY(16) " constraints ", " \
"FOREIGN KEY (`" column "`) REFERENCES `" data["relatedTable"] "`(`objectId`)"
elif dataType == "FILE_REF":
dataType = "VARCHAR(256) " constraints
else:
dataType = " " constraints
return dataType
# drop tables.
if drop_existing_tables:
for file in files:
tableName = file.rpartition('.')[0]
commands = "DROP TABLE IF EXISTS " tableName ";\n"
commands = "\n"
# generate SQL scripts from backendless schema csv file.
for file in files:
row = next(csv.reader(open(file)))
tableName = file.rpartition('.')[0]
variablesString = ""
first=True
for f in row:
column,_,rest = f.partition('(')
data = json.loads(rest[:-1])
constraints = constraints_to_string(data.get("constraints"))
dataType = map_data_type(data, column, constraints)
if not first:
variablesString = ", \n"
variablesString = "`" column "`" " " dataType
first = False
commands = "CREATE TABLE IF NOT EXISTS " tableName " (" variablesString ");\n\n"
print(commands)
mycursor = mydb.cursor()
results = mycursor.execute(commands, multi=True)
if input("are you sure you want to execute the SQL script above? (y/n)") != "y":
mydb.commit()