Commit dfaa5e39 authored by Sam Thursfield's avatar Sam Thursfield

Merge branch 'python3-port' into 'master'

generators: Port to python3

See merge request GNOME/tracker!60
parents 0762e076 9fc1e181
......@@ -24,7 +24,7 @@ with this program; if not, write to the Free Software Foundation, Inc.,
import csv
import string
import cPickle as pickle
import pickle as pickle
import random
import os
......@@ -37,7 +37,7 @@ simple_files_to_process = ['street-names.txt', 'street-types.txt', 'latin-words.
def load_files():
# Process Zip Codes
all_zips = {}
reader = csv.reader(open(os.path.join(data_dir,"zip-codes.txt"), "rb"))
reader = csv.reader(open(os.path.join(data_dir,"zip-codes.txt")))
for row in reader:
data = [string.capwords(row[3]), row[4]]
all_zips[row[0]] = data
......@@ -45,7 +45,7 @@ def load_files():
pickle.dump(all_zips, output)
#Process area codes
area_code_file = open(os.path.join(data_dir,"area-codes.txt"), "rb")
area_code_file = open(os.path.join(data_dir,"area-codes.txt"))
state_area_codes = {}
for line in area_code_file:
clean_line = line.replace(' ','').rstrip('\n')
......@@ -55,7 +55,7 @@ def load_files():
#Process Last Names
last_names = []
last_name_file = open(os.path.join(data_dir,"last-name.txt"),"rb")
last_name_file = open(os.path.join(data_dir,"last-name.txt"))
for line in last_name_file:
clean_line = line.rstrip('\n')
last_names.append(string.capwords(clean_line.split(' ')[0]))
......@@ -64,7 +64,7 @@ def load_files():
#Process Male First Names
male_first_names = []
male_first_name_file = open(os.path.join(data_dir,"male-first-name.txt"),"rb")
male_first_name_file = open(os.path.join(data_dir,"male-first-name.txt"))
for line in male_first_name_file:
clean_line = line.rstrip('\n')
male_first_names.append(string.capwords(clean_line.split(' ')[0]))
......@@ -73,7 +73,7 @@ def load_files():
#Process Female First Names
female_first_names = []
female_first_name_file = open(os.path.join(data_dir,"female-first-name.txt"),"rb")
female_first_name_file = open(os.path.join(data_dir,"female-first-name.txt"))
for line in female_first_name_file:
clean_line = line.rstrip('\n')
female_first_names.append(string.capwords(clean_line.split(' ')[0]))
......@@ -83,7 +83,7 @@ def load_files():
#Process the simple files
for f in simple_files_to_process:
temp = []
sample_file = open(os.path.join(data_dir, f), "rb")
sample_file = open(os.path.join(data_dir, f))
for line in sample_file:
clean_line = line.rstrip('\n')
temp.append(clean_line)
......@@ -93,7 +93,7 @@ def load_files():
output.close()
if __name__ == "__main__":
response = string.lower(raw_input("Type 'yes' to reload the data from source files and create a new source file: "))
response = string.lower(input("Type 'yes' to reload the data from source files and create a new source file: "))
if response == 'yes':
load_files()
......
......@@ -17,7 +17,7 @@ with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
import cPickle as pickle
import pickle as pickle
import random
import string
import calendar
......@@ -75,7 +75,7 @@ def create_job_title():
def create_phone(zip_code=None):
if not zip_code:
zip_code = random.choice(all_zips.keys())
zip_code = random.choice(list(all_zips.keys()))
area_code = random.choice(state_area_codes[all_zips[zip_code][1]])
output = "(%s)%s-%s" % (area_code, random.randint(111,999), random.randint(1111,9999))
return(output)
......@@ -89,27 +89,27 @@ def create_street():
def create_city_state_zip(zip_code=None):
if not zip_code:
zip_code = random.choice(all_zips.keys())
zip_code = random.choice(list(all_zips.keys()))
return(zip_code, all_zips[zip_code][0], all_zips[zip_code][1])
def create_sentence(min=4, max=15):
sentence = []
sentence.append(string.capitalize(random.choice(latin_words)))
for word in xrange(1, random.randint(min, max-1)):
sentence.append(random.choice(latin_words).capitalize())
for word in range(1, random.randint(min, max-1)):
sentence.append(random.choice(latin_words))
return ' '.join(sentence) + '.'
def create_paragraphs(num=1, min_sentences=4, max_sentences=7):
paragraphs = []
for para in xrange(0, num):
for sentence in xrange(1, random.randint(min_sentences, max_sentences)):
for para in range(0, num):
for sentence in range(1, random.randint(min_sentences, max_sentences)):
paragraphs.append(create_sentence()+" ")
paragraphs.append("\n\n")
return "".join(paragraphs)
def create_text(min_sentences=4, max_sentences=10):
text = []
for sentence in xrange(1, random.randint(min_sentences, max_sentences)):
for sentence in range(1, random.randint(min_sentences, max_sentences)):
text.append(create_sentence())
return ' '.join(text)
......@@ -139,7 +139,7 @@ def create_birthday(age=random.randint (16, 80)):
def create_email(tld=None, name=create_name()):
if not tld:
tld = random.choice(email_domains)
user = random.choice(usernames) % tuple(map(lambda n: n.lower(), name))
user = random.choice(usernames) % tuple([n.lower() for n in name])
domain = random.choice(latin_words) + random.choice(latin_words)
return ("%s@%s.%s" % (user, domain, tld))
......@@ -152,7 +152,7 @@ def create_company_name(biz_type=None):
random.choice(last_names))
name.append('LLP')
else:
for i in xrange(1,random.randint(2,4)):
for i in range(1,random.randint(2,4)):
rand_name = random.choice(company_names)
if rand_name not in name:
name.append(rand_name)
......@@ -173,17 +173,17 @@ if __name__ == "__main__":
add = create_street()
zip, city, state = create_city_state_zip()
phone = create_phone(zip)
print first, last
print add
print "%s %s, %s" % (city, state,zip)
print phone
print create_sentence(), "\n"
print create_paragraphs(num=3)
print(first, last)
print(add)
print("%s %s, %s" % (city, state,zip))
print(phone)
print(create_sentence(), "\n")
print(create_paragraphs(num=3))
expiry = create_date(max_years_future=3)
print expiry.strftime("%m/%y")
print create_email()
print create_company_name()
print create_job_title()
print(expiry.strftime("%m/%y"))
print(create_email())
print(create_company_name())
print(create_job_title())
#! /usr/bin/env python2
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
......@@ -6,7 +6,7 @@ import os
import string
import time
import sys
import ConfigParser
import configparser
import platform
import ontology_prefixes
......@@ -28,7 +28,7 @@ def recent_enough_python ():
"""
True if it is 2.6 or more recent
"""
print "Running generate with python", platform.python_version ()
print("Running generate with python", platform.python_version ())
version = platform.python_version_tuple ()
return (int(version[0]) >= 2 and int(version[1]) >= 6)
......@@ -43,7 +43,7 @@ def argument_parser():
args = argument_parser().parse_args()
config = ConfigParser.RawConfigParser()
config = configparser.RawConfigParser()
try:
loaded_files = config.read(args.config_file)
# config.read
......@@ -51,10 +51,10 @@ try:
# in 2.6+ returns a list of loaded files
if recent_enough_python ():
if (len (loaded_files) != 1):
print "Cannot open %s" % (args.config_file)
print("Cannot open %s" % (args.config_file))
sys.exit (-1)
except Exception, e:
print "Failed to read configuration file %s (%s)" % (args.config_file, e)
except Exception as e:
print("Failed to read configuration file %s (%s)" % (args.config_file, e))
sys.exit (-1)
if args.output_dir:
......@@ -111,9 +111,9 @@ tools.addType( 'mfo#FeedMessage', 81 )
tools.addType( 'mto#TransferElement', 90 )
tools.addType( 'mto#UploadTransfer', 91 )
print "Generating Contacts",
print("Generating Contacts")
count_contacts = get_counter('counts','contacts')
for contact in xrange(1, count_contacts+1):
for contact in range(1, count_contacts+1):
if (contact % 10 == 0):
sys.stdout.write('.')
sys.stdout.flush()
......@@ -125,18 +125,18 @@ for contact in xrange(1, count_contacts+1):
nco.generateIMAddress( contact )
nco.generateContactIM( contact )
nco.generatePersonContact( contact )
print "Done"
print("Done")
print "Generating Locations and landmarks",
print("Generating Locations and landmarks")
count_locations = get_counter('counts','locations')
for location in xrange(1, count_locations+1):
for location in range(1, count_locations+1):
if (location % 10 == 0):
sys.stdout.write('.')
sys.stdout.flush()
slo.generateGeoLocation( location )
slo.generateLandmark( location )
count_locations = get_counter('counts','locations')
for location in xrange(1, count_locations+1):
for location in range(1, count_locations+1):
if (location % 10 == 0):
sys.stdout.write('.')
sys.stdout.flush()
......@@ -144,142 +144,142 @@ for location in xrange(1, count_locations+1):
mlo.generateLocationBoundingBox( location )
mlo.generateGeoLocation( location )
mlo.generateLandmark( location )
print "Done"
print("Done")
print "Generate Emails",
print("Generate Emails")
count_accounts = get_counter('counts','accounts')
count_folders = get_counter('counts','folders')
count_emails = get_counter('counts','emails')
for account in xrange(1, count_accounts+1):
for account in range(1, count_accounts+1):
sys.stdout.write('.')
sys.stdout.flush()
nmo.generateMailAccount( account*count_folders*count_emails )
for folder in xrange(1, count_folders+1):
for folder in range(1, count_folders+1):
nmo.generateMailFolder( account*count_folders*count_emails+folder*count_emails )
for email in xrange(1, count_emails+1):
for email in range(1, count_emails+1):
nmo.generateEmail( account*count_folders*count_emails+folder*count_emails+email )
print "Done"
print("Done")
print "Generate IM messages",
print("Generate IM messages")
count_comchans = get_counter('counts','comchans')
count_ims = get_counter('counts','ims')
for comchannel in xrange(1, count_comchans+1):
for comchannel in range(1, count_comchans+1):
sys.stdout.write('.')
sys.stdout.flush()
nmo.generateCommunicationChannel( comchannel )
for im in xrange(1, count_ims+1):
for im in range(1, count_ims+1):
nmo.generateIMMessage( comchannel*count_ims+im )
print "Done"
print("Done")
print "Generate SMS messages",
print("Generate SMS messages")
count_sms = get_counter('counts','sms')
for sms in xrange(1, count_sms+1):
for sms in range(1, count_sms+1):
if (sms % 10 == 0):
sys.stdout.write('.')
sys.stdout.flush()
nmo.generateSMSMessage( sms )
print "Done"
print("Done")
print "Generate calls",
print("Generate calls")
count_calls = get_counter('counts','calls')
for call in xrange(1, count_calls+1):
for call in range(1, count_calls+1):
if (call % 10 == 0):
sys.stdout.write('.')
sys.stdout.flush()
nmo.generateCall( call )
print "Done"
print("Done")
print "* Starting with file system based content"
print("* Starting with file system based content")
print "Generate volumes",
print("Generate volumes")
count_volumes = get_counter('counts','volumes')
for volume in xrange(1, count_volumes+1):
for volume in range(1, count_volumes+1):
sys.stdout.write('.')
sys.stdout.flush()
tracker.generateVolume( volume )
print "Done"
print("Done")
print "Generating Music",
print("Generating Music")
count_artists = get_counter('counts','artists')
count_albums = get_counter('counts','albums')
count_songs = get_counter('counts','songs')
count_discs = get_counter('counts','discs')
song_index = 0
for artist in xrange(1, count_artists+1):
for artist in range(1, count_artists+1):
sys.stdout.write('.')
sys.stdout.flush()
nmm.generateArtist( artist*count_albums*count_songs )
for album in xrange(1, count_albums+1):
for album in range(1, count_albums+1):
nmm.generateAlbum( artist*count_albums*count_songs+album )
for disc in xrange(1, count_discs+1):
for disc in range(1, count_discs+1):
discUri = nmm.generateMusicAlbumDisc( artist*count_albums*count_songs+album+disc )
for song in xrange(1, count_songs+1):
for song in range(1, count_songs+1):
nmm.generateMusicPiece( song_index, discUri )
song_index = song_index + 1
print "Done"
print("Done")
print "Generate Equipment",
print("Generate Equipment")
count_equipment = get_counter('counts','equipment')
nmm.generateOwnEquipment ()
for equipment in xrange(1, count_equipment):
for equipment in range(1, count_equipment):
if (equipment % 2 == 0):
sys.stdout.write('.')
sys.stdout.flush()
nmm.generateEquipment( equipment )
print "Done"
print("Done")
print "Generate Photos",
print("Generate Photos")
count_images = get_counter('counts','images')
for photo in xrange(1, count_images+1):
for photo in range(1, count_images+1):
if (photo % 10 == 0):
sys.stdout.write('.')
sys.stdout.flush()
nmm.generatePhoto( photo )
print "Done"
print("Done")
print "Generate Videos",
print("Generate Videos")
count_videos = get_counter('counts','videos')
for video in xrange(1, count_videos+1):
for video in range(1, count_videos+1):
if (video % 10 == 0):
sys.stdout.write('.')
sys.stdout.flush()
nmm.generateVideo( video )
print "Done"
print("Done")
print "Generate plain text documents",
print("Generate plain text documents")
count_docs = get_counter('counts','docs')
for doc in xrange(1, count_docs+1):
for doc in range(1, count_docs+1):
if (doc % 10 == 0):
sys.stdout.write('.')
sys.stdout.flush()
nfo.generatePlainTextDocument( doc )
print "Done"
print("Done")
print "Generate feeds",
print("Generate feeds")
count_fchans = get_counter('counts','fchans')
count_fms = get_counter('counts','fms')
for fchan in xrange(1, count_fchans+1):
for fchan in range(1, count_fchans+1):
sys.stdout.write('.')
sys.stdout.flush()
mfo.generateFeedChannel( fchan*count_fms )
for fm in xrange(1, count_fms+1):
for fm in range(1, count_fms+1):
mfo.generateFeedMessage( fchan*count_fms+fm )
print "Done"
print("Done")
print "Generate software",
print("Generate software")
count_softcats = get_counter('counts','softcats')
count_softapps = get_counter('counts','softapps')
for softcat in xrange(1, count_softcats+1):
for softcat in range(1, count_softcats+1):
sys.stdout.write('.')
sys.stdout.flush()
nfo.generateSoftwareCategory( softcat*count_softapps )
for softapp in xrange(1, count_softapps+1):
for softapp in range(1, count_softapps+1):
nfo.generateSoftwareApplication( softcat*count_softapps+softapp )
print "Done"
print("Done")
print "Generate something for the rest",
print("Generate something for the rest")
count_others = get_counter('counts','others')
for index in xrange(1,count_others+1):
for index in range(1,count_others+1):
if (index % 10 == 0):
sys.stdout.write('.')
sys.stdout.flush()
......@@ -290,7 +290,7 @@ for index in xrange(1,count_others+1):
ncal.generateTodo( index )
mto.generateTransferElement( index )
mto.generateUploadTransfer( index )
print "Done"
print("Done")
# dump all files
tools.saveResult(output_dir=args.output_dir)
......@@ -41,7 +41,7 @@ def generateUploadTransfer(index):
# add some random transfers
upload_transfers = ''
for index in xrange (1, 2 + (index % 10)):
for index in range (1, 2 + (index % 10)):
upload_transfers += 'mto:transferList <%s> ;\n' % tools.getRandomUri( 'mto#TransferElement' )
tools.addItem( me, upload_uri, mto_UploadTransfer % locals() )
......@@ -32,8 +32,8 @@ def getRandomUri(type):
def saveResult (output_dir=None):
output_dir = output_dir or 'ttl'
for ontology, content in result.items():
print 'Saving', output_filenames[ontology], '...'
for ontology, content in list(result.items()):
print('Saving', output_filenames[ontology], '...')
path = os.path.join(output_dir, output_filenames[ontology])
output = open(path, 'w')
output.write( ontology_prefixes.ontology_prefixes )
......
#!/usr/bin/env python
#!/usr/bin/env python3
'''Create tree from real data!
......@@ -28,14 +28,9 @@ import argparse
import os
import sys
if sys.version_info[0] >= 3:
import configparser
import urllib.parse as urlparse
from urllib.parse import unquote as url_unquote
else:
import ConfigParser as configparser
import urlparse
from urllib import unquote as url_unquote
import configparser
import urllib.parse as urlparse
from urllib.parse import unquote as url_unquote
def argument_parser():
......@@ -105,13 +100,13 @@ def resources_with_mime_types(db, mime_types, limit=10):
''' % (make_sparql_list(mime_types), limit)
result = db.query(query)
while result.next():
while next(result):
yield result.get_string(0)[0]
def file_url_to_path(url):
'''Convert file:// URL to a pathname.'''
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
scheme, netloc, path, params, query, fragment = urllib.parse.urlparse(url)
if scheme != 'file':
raise RuntimeError("Only file:// URLs are supported.")
if any([netloc, params, query, fragment]):
......@@ -142,7 +137,7 @@ def main():
extractors_with_no_files = []
for extractor, mime_types in rule_map.items():
for extractor, mime_types in list(rule_map.items()):
resources = list(resources_with_mime_types(db, mime_types, limit=2))
if (len(resources) == 0):
......
......@@ -96,7 +96,7 @@ def merge_includes(all_includes):
version = element.get('version')
if name not in merged:
merged[name] = element
return merged.values()
return list(merged.values())
def merge_namespaces(all_namespaces):
......
......@@ -17,12 +17,12 @@
# 02110-1301, USA.
#
import ConfigParser, os
import configparser, os
import sys
import getopt
def usage():
print "Usage: python service2rdf-xml.py --metadata=ONTOLOGY.metadata --service=ONTOLOGY.service [--uri=URI]"
print("Usage: python service2rdf-xml.py --metadata=ONTOLOGY.metadata --service=ONTOLOGY.service [--uri=URI]")
def main():
try:
......@@ -50,74 +50,74 @@ def main():
usage ()
sys.exit ()
service = ConfigParser.ConfigParser()
service = configparser.ConfigParser()
service.readfp(open(servicef))
metadata = ConfigParser.ConfigParser()
metadata = configparser.ConfigParser()
metadata.readfp(open(metadataf))
print "<rdf:RDF"
print " xmlns:nid3=\"http://www.semanticdesktop.org/ontologies/2007/05/10/nid3#\""
print " xmlns:nfo=\"http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#\""
print " xmlns:nmo=\"http://www.semanticdesktop.org/ontologies/2007/03/22/nmo#\""
print " xmlns:nie=\"http://www.semanticdesktop.org/ontologies/2007/01/19/nie#\""
print " xmlns:exif=\"http://www.kanzaki.com/ns/exif#\""
print " xmlns:nao=\"http://www.semanticdesktop.org/ontologies/2007/08/15/nao#\""
print " xmlns:rdfs=\"http://www.w3.org/2000/01/rdf-schema#\""
print " xmlns:protege=\"http://protege.stanford.edu/system#\""
print " xmlns:dcterms=\"http://purl.org/dc/terms/\""
print " xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\""
print " xmlns:ncal=\"http://www.semanticdesktop.org/ontologies/2007/04/02/ncal#\""
print " xmlns:xsd=\"http://www.w3.org/2001/XMLSchema#\""
print " xmlns:nrl=\"http://www.semanticdesktop.org/ontologies/2007/08/15/nrl#\""
print " xmlns:pimo=\"http://www.semanticdesktop.org/ontologies/2007/11/01/pimo#\""
print " xmlns:geo=\"http://www.w3.org/2003/01/geo/wgs84_pos#\""
print " xmlns:tmo=\"http://www.semanticdesktop.org/ontologies/2008/05/20/tmo#\""
print " xmlns:dc=\"http://purl.org/dc/elements/1.1/\""
print " xmlns:nco=\"http://www.semanticdesktop.org/ontologies/2007/03/22/nco#\""
print " xmlns:nexif=\"http://www.semanticdesktop.org/ontologies/2007/05/10/nexif#\">"
print ""
print("<rdf:RDF")
print(" xmlns:nid3=\"http://www.semanticdesktop.org/ontologies/2007/05/10/nid3#\"")
print(" xmlns:nfo=\"http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#\"")
print(" xmlns:nmo=\"http://www.semanticdesktop.org/ontologies/2007/03/22/nmo#\"")
print(" xmlns:nie=\"http://www.semanticdesktop.org/ontologies/2007/01/19/nie#\"")
print(" xmlns:exif=\"http://www.kanzaki.com/ns/exif#\"")
print(" xmlns:nao=\"http://www.semanticdesktop.org/ontologies/2007/08/15/nao#\"")
print(" xmlns:rdfs=\"http://www.w3.org/2000/01/rdf-schema#\"")
print(" xmlns:protege=\"http://protege.stanford.edu/system#\"")
print(" xmlns:dcterms=\"http://purl.org/dc/terms/\"")
print(" xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\"")
print(" xmlns:ncal=\"http://www.semanticdesktop.org/ontologies/2007/04/02/ncal#\"")
print(" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema#\"")
print(" xmlns:nrl=\"http://www.semanticdesktop.org/ontologies/2007/08/15/nrl#\"")
print(" xmlns:pimo=\"http://www.semanticdesktop.org/ontologies/2007/11/01/pimo#\"")
print(" xmlns:geo=\"http://www.w3.org/2003/01/geo/wgs84_pos#\"")
print(" xmlns:tmo=\"http://www.semanticdesktop.org/ontologies/2008/05/20/tmo#\"")
print(" xmlns:dc=\"http://purl.org/dc/elements/1.1/\"")
print(" xmlns:nco=\"http://www.semanticdesktop.org/ontologies/2007/03/22/nco#\"")
print(" xmlns:nexif=\"http://www.semanticdesktop.org/ontologies/2007/05/10/nexif#\">")
print("")
for klass in service.sections():
splitted = klass.split (":")
print "\t<rdfs:Class rdf:about=\"" + uri + "/" + splitted[0] + "#" + splitted[1] + "\">"
print "\t\t<rdfs:label>" + splitted[1] + "</rdfs:label>"
print("\t<rdfs:Class rdf:about=\"" + uri + "/" + splitted[0] + "#" + splitted[1] + "\">")
print("\t\t<rdfs:label>" + splitted[1] + "</rdfs:label>")
for name, value in service.items (klass):
if name == "SuperClasses":
vsplit = value.split (";")
for val in vsplit:
vvsplit = val.split (":");
print "\t\t<rdfs:subClassOf>"
print "\t\t\t<rdfs:Class rdf:about=\"" + uri + "/" +vvsplit[0] + "#" + vvsplit[1] + "\"/>"
print "\t\t</rdfs:subClassOf>"
print "\t</rdfs:Class>"
print("\t\t<rdfs:subClassOf>")
print("\t\t\t<rdfs:Class rdf:about=\"" + uri + "/" +vvsplit[0] + "#" + vvsplit[1] + "\"/>")
print("\t\t</rdfs:subClassOf>")
print("\t</rdfs:Class>")
for mdata in metadata.sections():
splitted = mdata.split (":")
print "\t<rdf:Property rdf:about=\"" + uri + "#" + splitted[1] + "\">"
print "\t\t<rdfs:label>" + splitted[1] + "</rdfs:label>"
print("\t<rdf:Property rdf:about=\"" + uri + "#" + splitted[1] + "\">")
print("\t\t<rdfs:label>" + splitted[1] + "</rdfs:label>")
for name, value in metadata.items (mdata):
if name == "datatype":
print "\t\t<rdfs:range rdf:resource=\"" + uri + "#" + value + "\"/>"
print("\t\t<rdfs:range rdf:resource=\"" + uri + "#" + value + "\"/>")
if name == "domain":
vvsplit = value.split (":")
print "\t\t<rdfs:domain rdf:resource=\"" + uri + "/" +vvsplit[0] + "#" + vvsplit[1] + "\"/>"
print("\t\t<rdfs:domain rdf:resource=\"" + uri + "/" +vvsplit[0] + "#" + vvsplit[1] + "\"/>")
if name == "parent":
print "\t\t<rdfs:subPropertyOf rdf:resource=\"" + uri + "#" + value.split (":")[1] + "\"/>"
print("\t\t<rdfs:subPropertyOf rdf:resource=\"" + uri + "#" + value.split (":")[1] + "\"/>")
if name == "weight":
print "\t\t<rdfs:comment>Weight is " + value + "</rdfs:comment>"
print("\t\t<rdfs:comment>Weight is " + value + "</rdfs:comment>")
print "\t</rdf:Property>"
print("\t</rdf:Property>")
print "</rdf:RDF>"
except getopt.GetoptError, err:
print str(err)
print("</rdf:RDF>")
except getopt.GetoptError as err:
print(str(err))
usage ()
sys.exit(2)
......
......@@ -17,25 +17,25 @@
# 02110-1301, USA.
#
import ConfigParser, os
import configparser, os
import sys
config = ConfigParser.ConfigParser()
config = configparser.ConfigParser()
config.readfp(sys.stdin)
print "<service>"
print("<service>")
for section in config.sections():
splitted = section.split (":")
print " <section prefix=\"" + splitted[0] + "\" name=\"" + splitted[1] + "\" fullname=\"" + section + "\">"
print(" <section prefix=\"" + splitted[0] + "\" name=\"" + splitted[1] + "\" fullname=\"" + section + "\">")
for name, value in config.items (section):
vsplit = value.split (":")
print "\t<item name=\""+ name + "\" fullvalue=\"" + value+ "\">"
print("\t<item name=\""+ name + "\" fullvalue=\"" + value+ "\">")