#!/usr/bin/python
#
ver=0.031
# This version not ready for prime time
# scrapes craigslist for work
# pflint Sat 19 Apr 2014 09:42:23 AM EDT 
#
# PYTHONIOENCODING=utf-8 # die unicode die
#
import os
import re
import sys
import stat
import optparse
import subprocess
import unicodedata
import doctest
import signal
import time
import io
import csv
import string
from subprocess import call
from BeautifulSoup import BeautifulSoup
import urllib2
# base="http://montreal.fr.craigslist.ca/search/"
cityw = {
	'Burlington VT':'http://burlington.craigslist.org/search/',
	'Washington DC':'http://washingtondc.craigslist.org/search/',
	'Montreal CA':'http://montreal.fr.craigslist.ca/search/',
	'Albany NY':'http://albany.craigslist.org/search/',
	'Eastern CT':'http://newlondon.craigslist.org/search/',
	'Hartford CT':'http://hartford.craigslist.org/search/',
	'New Haven CT':'http://newhaven.craigslist.org/search/',
	'Northwest CT':'http://nwct.craigslist.org/search/',
	}
#	'':'',
# divider
#    	  }
# norun=  {
#
specs =['eng','sad','sof','tsh']
for key, value in dict.items(cityw):
	# print key,value
	city=key
	base=value
	# print city,base
	for spec in specs:
		url=base+spec+"?addOne=telecommuting"
		# print url
		#Create the soup
		page=urllib2.urlopen(url)
		soup = BeautifulSoup(page.read())
		#Search the soup
		sline = soup.findAll('p',{'class':'row'})
		# print('<p> '+city+' '+spec+' </p>')	
		# print('<base href="'+base+'">')
		burl='href="'+base[0:(len(base)-7)]
		# print burl
		# dline is a unicode bs4.element.Tag
		for dinstance in sline:
			udl=dinstance.encode("utf-8")
			mline=str([udl])
			dline=mline[2:(len(mline)-2)]
			print string.replace(dline,'href="/',burl);


