diff --git a/model-files/RunModel.bat b/model-files/RunModel.bat index d2b640d1..2e95e847 100644 --- a/model-files/RunModel.bat +++ b/model-files/RunModel.bat @@ -1,9 +1,5 @@ @echo on -:: set RUNTYPE=LOCAL to run everything on this machine -:: set RUNTYPE=DISTRIBUTED to farm out work to other nodes -set RUNTYPE=LOCAL - ::~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :: RunModel.bat :: @@ -63,10 +59,9 @@ SET AV_SCENARIO=0 :: Scripts base directory SET BASE_SCRIPTS=CTRAMP\scripts -:: Add these variables to the PATH environment variable, moving the current path to the back of the list -set PATH=%CD%\CTRAMP\runtime;C:\Windows\System32;%JAVA_PATH%\bin;%TPP_PATH%;%CUBE_PATH%;%CUBE_DLL_PATH%;%PYTHON_PATH%;%PYTHON_PATH%\condabin;%PYTHON_PATH%\envs - -CALL conda activate mtc_py2 +:: expect conda to be in PATH +CALL conda activate %TM2_PYTHON_CONDA_ENV% +IF ERRORLEVEL 2 goto done :: --------- restart block ------------------------------------------------------------------------------ :: Use these only if restarting @@ -143,18 +138,18 @@ if NOT %MATRIX_SERVER%==localhost ( :: preprocess input network to :: 1 - fix space issue in CNTYPE :: 2 - add a FEET field based on DISTANCE -runtpp %BASE_SCRIPTS%\preprocess\preprocess_input_net.job +"%TPP_PATH%\runtpp" %BASE_SCRIPTS%\preprocess\preprocess_input_net.job IF ERRORLEVEL 2 goto done :: Write a batch file with number of zones, taps, mazs -runtpp %BASE_SCRIPTS%\preprocess\writeZoneSystems.job +"%TPP_PATH%\runtpp" %BASE_SCRIPTS%\preprocess\writeZoneSystems.job if ERRORLEVEL 2 goto done ::Run the batch file call zoneSystem.bat :: Build sequential numberings -runtpp %BASE_SCRIPTS%\preprocess\zone_seq_net_builder.job +"%TPP_PATH%\runtpp" %BASE_SCRIPTS%\preprocess\zone_seq_net_builder.job :: Create all necessary input files based on updated sequential zone numbering :zones @@ -172,11 +167,11 @@ move popsyn\households_renum.csv popsyn\households.csv IF %SELECT_COUNTY% GTR 0 ( :: Collapse the mazs outside select county - runtpp %BASE_SCRIPTS%\preprocess\CreateCollapsedNetwork.job + "%TPP_PATH%\runtpp" %BASE_SCRIPTS%\preprocess\CreateCollapsedNetwork.job if ERRORLEVEL 2 goto done :: RERUN: Write a batch file with number of zones, taps, mazs - runtpp %BASE_SCRIPTS%\preprocess\writeZoneSystems.job + "%TPP_PATH%\runtpp" %BASE_SCRIPTS%\preprocess\writeZoneSystems.job if ERRORLEVEL 2 goto done ::RERUN: Run the batch file @@ -193,7 +188,7 @@ IF %SELECT_COUNTY% GTR 0 ( python %BASE_SCRIPTS%\preprocess\popsampler.PY landuse\sampleRateByTAZ.csv popsyn\households.csv popsyn\persons.csv :: RERUN: Build sequential numberings - runtpp %BASE_SCRIPTS%\preprocess\zone_seq_net_builder.job + "%TPP_PATH%\runtpp" %BASE_SCRIPTS%\preprocess\zone_seq_net_builder.job if ERRORLEVEL 2 goto done ::RERUN: Create all necessary input files based on updated sequential zone numbering @@ -209,7 +204,7 @@ IF %SELECT_COUNTY% GTR 0 ( :: move popsyn\households_renum.csv popsyn\households.csv :: Write out the intersection and maz XYs -runtpp %BASE_SCRIPTS%\preprocess\maz_densities.job +"%TPP_PATH%\runtpp" %BASE_SCRIPTS%\preprocess\maz_densities.job if ERRORLEVEL 2 goto done :: Calculate density fields and append to MAZ file @@ -217,15 +212,15 @@ python %BASE_SCRIPTS%\preprocess\createMazDensityFile.py IF ERRORLEVEL 1 goto done :: Build sequential numberings -runtpp %BASE_SCRIPTS%\preprocess\zone_seq_net_builder.job +"%TPP_PATH%\runtpp" %BASE_SCRIPTS%\preprocess\zone_seq_net_builder.job if ERRORLEVEL 2 goto done :: Translate the roadway network into a non-motorized network -runtpp %BASE_SCRIPTS%\preprocess\CreateNonMotorizedNetwork.job +"%TPP_PATH%\runtpp" %BASE_SCRIPTS%\preprocess\CreateNonMotorizedNetwork.job if ERRORLEVEL 2 goto done :: Create the tap data -runtpp %BASE_SCRIPTS%\preprocess\tap_to_taz_for_parking.job +"%TPP_PATH%\runtpp" %BASE_SCRIPTS%\preprocess\tap_to_taz_for_parking.job if ERRORLEVEL 2 goto done python %BASE_SCRIPTS%\preprocess\tap_data_builder.py . @@ -235,28 +230,28 @@ IF ERRORLEVEL 1 goto done python %BASE_SCRIPTS%\preprocess\csvToDbf.py hwy\tolls.csv hwy\tolls.dbf IF ERRORLEVEL 1 goto done -runtpp %BASE_SCRIPTS%\preprocess\SetTolls.job +"%TPP_PATH%\runtpp" %BASE_SCRIPTS%\preprocess\SetTolls.job if ERRORLEVEL 2 goto done :: Set a penalty to dummy links connecting HOV/HOT lanes and general purpose lanes -runtpp %BASE_SCRIPTS%\preprocess\SetHovXferPenalties.job +"%TPP_PATH%\runtpp" %BASE_SCRIPTS%\preprocess\SetHovXferPenalties.job if ERRORLEVEL 2 goto done :capclass :: Create areatype and capclass fields in network -runtpp %BASE_SCRIPTS%\preprocess\SetCapClass.job +"%TPP_PATH%\runtpp" %BASE_SCRIPTS%\preprocess\SetCapClass.job if ERRORLEVEL 2 goto done -runtpp %BASE_SCRIPTS%\preprocess\setInterchangeDistance.job +"%TPP_PATH%\runtpp" %BASE_SCRIPTS%\preprocess\setInterchangeDistance.job if ERRORLEVEL 2 goto done :createfivehwynets :: Create time-of-day-specific -runtpp %BASE_SCRIPTS%\preprocess\CreateFiveHighwayNetworks.job +"%TPP_PATH%\runtpp" %BASE_SCRIPTS%\preprocess\CreateFiveHighwayNetworks.job if ERRORLEVEL 2 goto done :: Create taz networks -runtpp %BASE_SCRIPTS%\preprocess\BuildTazNetworks.job +"%TPP_PATH%\runtpp" %BASE_SCRIPTS%\preprocess\BuildTazNetworks.job if ERRORLEVEL 2 goto done echo COMPLETED PREPROCESS %DATE% %TIME% >> logs\feedback.rpt @@ -272,11 +267,11 @@ echo COMPLETED PREPROCESS %DATE% %TIME% >> logs\feedback.rpt :nonmot :: Build the skim tables -runtpp %BASE_SCRIPTS%\skims\NonMotorizedSkims.job +"%TPP_PATH%\runtpp" %BASE_SCRIPTS%\skims\NonMotorizedSkims.job if ERRORLEVEL 2 goto done :::: Build the maz-maz skims -runtpp %BASE_SCRIPTS%\skims\MazMazSkims.job +"%TPP_PATH%\runtpp" %BASE_SCRIPTS%\skims\MazMazSkims.job if ERRORLEVEL 2 goto done echo COMPLETED NON-MOTORIZED-SKIMS %DATE% %TIME% >> logs\feedback.rpt @@ -287,7 +282,7 @@ echo COMPLETED NON-MOTORIZED-SKIMS %DATE% %TIME% >> logs\feedback.rpt :: :: ------------------------------------------------------------------------------------------------------ :: Run the airport model -runtpp %BASE_SCRIPTS%\nonres\BuildAirPax.job +"%TPP_PATH%\runtpp" %BASE_SCRIPTS%\nonres\BuildAirPax.job if ERRORLEVEL 2 goto done :itercnt @@ -300,31 +295,30 @@ if ERRORLEVEL 2 goto done :: Build the initial highway and transit skims :hwyskims -runtpp %BASE_SCRIPTS%\skims\HwySkims.job +"%TPP_PATH%\runtpp" %BASE_SCRIPTS%\skims\HwySkims.job if ERRORLEVEL 2 goto done :transitnet -runtpp %BASE_SCRIPTS%\skims\BuildTransitNetworks.job +"%TPP_PATH%\runtpp" %BASE_SCRIPTS%\skims\BuildTransitNetworks.job if ERRORLEVEL 2 goto done :transitskimsprep -runtpp %BASE_SCRIPTS%\skims\TransitSkimsPrep.job +"%TPP_PATH%\runtpp" %BASE_SCRIPTS%\skims\TransitSkimsPrep.job if ERRORLEVEL 2 goto done :createemmenetwork -:: changing to python 3 environment for emme -CALL conda deactivate -CALL conda activate mtc :: Create emme project from scratch since it's the first iteration -python %BASE_SCRIPTS%\skims\cube_to_emme_network_conversion.py -p "trn" --first_iteration "yes" +"%PYTHON_PATH%\python" %BASE_SCRIPTS%\skims\cube_to_emme_network_conversion.py -p "trn" --first_iteration "yes" IF ERRORLEVEL 1 goto done -%EMME_PYTHON_PATH%\python %BASE_SCRIPTS%\skims\create_emme_network.py -p "trn" --name "mtc_emme" --first_iteration "yes" +"%PYTHON_PATH%\python" %BASE_SCRIPTS%\skims\create_emme_network.py -p "trn" --name "mtc_emme" --first_iteration "yes" IF ERRORLEVEL 1 goto done -REM %EMME_PYTHON_PATH%\python %BASE_SCRIPTS%\skims\skim_transit_network.py -p "trn" -s "skims" --first_iteration "yes" -%EMME_PYTHON_PATH%\python %BASE_SCRIPTS%\skims\skim_transit_network.py -p "trn" -s "skims" --iteration 1 --skip_import_demand +:: Passing the port specified in the Emme Desktop GUI +:: see Tools > Application Options > Advanced. +:: At the bottom of the pane, there is text: "Desktop API is listening on port 4242." +"%PYTHON_PATH%\python" %BASE_SCRIPTS%\skims\skim_transit_network.py -p "trn" -s "skims" --iteration 1 --skip_import_demand --port 4242 IF ERRORLEVEL 1 goto done CALL conda deactivate @@ -332,10 +326,10 @@ CALL conda activate mtc_py2 :afteremmeskims -REM runtpp %BASE_SCRIPTS%\skims\TransitSkims.job +REM "%TPP_PATH%\runtpp" %BASE_SCRIPTS%\skims\TransitSkims.job REM if ERRORLEVEL 2 goto done -REM runtpp %BASE_SCRIPTS%\skims\SkimSetsAdjustment.job +REM "%TPP_PATH%\runtpp" %BASE_SCRIPTS%\skims\SkimSetsAdjustment.job REM if ERRORLEVEL 2 goto done ::Step X: Main model iteration setup @@ -431,8 +425,8 @@ ROBOCOPY "%MATRIX_SERVER_BASE_DIR%\ctramp_output" ctramp_output *.mat /NDL /NFL ROBOCOPY "%MATRIX_SERVER_BASE_DIR%\ctramp_output" ctramp_output *.omx /NDL /NFL :afterrobocopy -runtpp CTRAMP\scripts\assign\merge_auto_matrices.s -REM runtpp CTRAMP\scripts\assign\merge_demand_matrices.s +"%TPP_PATH%\runtpp" CTRAMP\scripts\assign\merge_auto_matrices.s +REM "%TPP_PATH%\runtpp" CTRAMP\scripts\assign\merge_demand_matrices.s if ERRORLEVEL 2 goto done :: ------------------------------------------------------------------------------------------------------ @@ -444,31 +438,31 @@ if ERRORLEVEL 2 goto done :nonres :: Build the internal/external demand matrices forecast -runtpp CTRAMP\scripts\nonres\IxForecasts.job +"%TPP_PATH%\runtpp" CTRAMP\scripts\nonres\IxForecasts.job if ERRORLEVEL 2 goto done :: Apply diurnal factors to the fixed internal/external demand matrices -runtpp CTRAMP\scripts\nonres\IxTimeOfDay.job +"%TPP_PATH%\runtpp" CTRAMP\scripts\nonres\IxTimeOfDay.job if ERRORLEVEL 2 goto done :: Apply a value toll choice model for the internal/external demand -runtpp CTRAMP\scripts\nonres\IxTollChoice.job +"%TPP_PATH%\runtpp" CTRAMP\scripts\nonres\IxTollChoice.job if ERRORLEVEL 2 goto done :: Apply the commercial vehicle generation models -runtpp CTRAMP\scripts\nonres\TruckTripGeneration.job +"%TPP_PATH%\runtpp" CTRAMP\scripts\nonres\TruckTripGeneration.job if ERRORLEVEL 2 goto done :: Apply the commercial vehicle distribution models -runtpp CTRAMP\scripts\nonres\TruckTripDistribution.job +"%TPP_PATH%\runtpp" CTRAMP\scripts\nonres\TruckTripDistribution.job if ERRORLEVEL 2 goto done :: Apply the commercial vehicle diurnal factors -runtpp CTRAMP\scripts\nonres\TruckTimeOfDay.job +"%TPP_PATH%\runtpp" CTRAMP\scripts\nonres\TruckTimeOfDay.job if ERRORLEVEL 2 goto done :: Apply a value toll choice model for eligible commercial demand -runtpp CTRAMP\scripts\nonres\TruckTollChoice.job +"%TPP_PATH%\runtpp" CTRAMP\scripts\nonres\TruckTollChoice.job if ERRORLEVEL 2 goto done :hwyasgn @@ -480,32 +474,32 @@ if ERRORLEVEL 2 goto done :: ------------------------------------------------------------------------------------------------------ :mazasgn -runtpp CTRAMP\scripts\assign\build_and_assign_maz_to_maz_auto.job +"%TPP_PATH%\runtpp" CTRAMP\scripts\assign\build_and_assign_maz_to_maz_auto.job if ERRORLEVEL 2 goto done :tazasgn -runtpp CTRAMP\scripts\assign\HwyAssign.job +"%TPP_PATH%\runtpp" CTRAMP\scripts\assign\HwyAssign.job if ERRORLEVEL 2 goto done -runtpp CTRAMP\scripts\assign\AverageNetworkVolumes.job +"%TPP_PATH%\runtpp" CTRAMP\scripts\assign\AverageNetworkVolumes.job if ERRORLEVEL 2 goto done -runtpp CTRAMP\scripts\assign\CalculateAverageSpeed.job +"%TPP_PATH%\runtpp" CTRAMP\scripts\assign\CalculateAverageSpeed.job if ERRORLEVEL 2 goto done -runtpp CTRAMP\scripts\assign\MergeNetworks.job +"%TPP_PATH%\runtpp" CTRAMP\scripts\assign\MergeNetworks.job if ERRORLEVEL 2 goto done :: If another iteration is to be run, run hwy skims IF %ITERATION% LSS %MAX_ITERATION% ( - runtpp %BASE_SCRIPTS%\skims\HwySkims.job + "%TPP_PATH%\runtpp" %BASE_SCRIPTS%\skims\HwySkims.job if ERRORLEVEL 2 goto done ) -runtpp %BASE_SCRIPTS%\skims\BuildTransitNetworks.job +"%TPP_PATH%\runtpp" %BASE_SCRIPTS%\skims\BuildTransitNetworks.job if ERRORLEVEL 2 goto done -runtpp %BASE_SCRIPTS%\skims\TransitSkimsPrep.job +"%TPP_PATH%\runtpp" %BASE_SCRIPTS%\skims\TransitSkimsPrep.job if ERRORLEVEL 2 goto done :emmeseconditeration @@ -533,7 +527,7 @@ SET /A INNER_ITERATION=0 SET /A INNER_ITERATION+=1 :: no longer needed - REM runtpp CTRAMP\scripts\assign\merge_transit_matrices.s + REM "%TPP_PATH%\runtpp" CTRAMP\scripts\assign\merge_transit_matrices.s REM if ERRORLEVEL 2 goto done :innerskim @@ -549,10 +543,10 @@ SET /A INNER_ITERATION+=1 :afterinnerskim :: Run Transit Assignment - REM runtpp CTRAMP\scripts\assign\TransitAssign.job + REM "%TPP_PATH%\runtpp" CTRAMP\scripts\assign\TransitAssign.job REM if ERRORLEVEL 2 goto done - REM runtpp %BASE_SCRIPTS%\skims\SkimSetsAdjustment.job + REM "%TPP_PATH%\runtpp" %BASE_SCRIPTS%\skims\SkimSetsAdjustment.job REM if ERRORLEVEL 2 goto done :: Start Matrix Server remotely or locally diff --git a/model-files/SetUpModel.bat b/model-files/SetUpModel.bat index 5d8970ad..5e8f8ba4 100644 --- a/model-files/SetUpModel.bat +++ b/model-files/SetUpModel.bat @@ -7,6 +7,10 @@ :: set ENVTYPE=MTC or RSG set ENVTYPE=MTC +:: set RUNTYPE=LOCAL to run everything on this machine +:: set RUNTYPE=DISTRIBUTED to farm out work to other nodes +set RUNTYPE=LOCAL + :: ------------------------------ :: Step 1: Specify file locations :: ------------------------------ @@ -53,8 +57,8 @@ c:\windows\system32\Robocopy.exe /E "%INPUT_NETWORK%\hwy" INPUT\hwy c:\windows\system32\Robocopy.exe /E "%INPUT_NETWORK%\trn" INPUT\trn :: popsyn and land use input -c:\windows\system32\Robocopy.exe /E "%INPUT_LU%" INPUT\popsyn -c:\windows\system32\Robocopy.exe /E "%INPUT_POPSYN%" INPUT\landuse +c:\windows\system32\Robocopy.exe /E "%INPUT_LU%" INPUT\landuse +c:\windows\system32\Robocopy.exe /E "%INPUT_POPSYN%" INPUT\popsyn :: non residential input c:\windows\system32\Robocopy.exe /E "%INPUT_NONRES%" INPUT\nonres diff --git a/model-files/runtime/CTRampEnv.bat b/model-files/runtime/CTRampEnv.bat index 7cc2d3e4..20797ffb 100644 --- a/model-files/runtime/CTRampEnv.bat +++ b/model-files/runtime/CTRampEnv.bat @@ -1,28 +1,41 @@ rem this file has environment variables for CT-RAMP batch files :: The location of the 64-bit java development kit or runtime environment -set JAVA_PATH="C:\Program Files\Java\jre1.8.0_261" +IF ENVTYPE==RSG ( + set JAVA_PATH="C:\Program Files\Java\jre1.8.0_261" +) ELSE ( + set JAVA_PATH=C:\Program Files\Java\jre1.8.0_301 +) -:: The location of the RUNTPP executable from Citilabs +:: The location of the RUNTPP, CLUSTER, VOYAGER executables from Citilabs set TPP_PATH=C:\Program Files\Citilabs\CubeVoyager -:: The location of the Cube executable from Citilabs -set CUBE_PATH=C:\Program Files (x86)\Citilabs\Cube +:: The location of the Cube executable from Citilabs (what is this used for?) +set CUBE_PATH=C:\Program Files\Citilabs\Cube + +:: The name of the conda python environment to use +:: Should have Emme packages installed, plus... +set TM2_PYTHON_CONDA_ENV=tm2_transit_ccr -:: Location of Emme python executable -::SET EMME_PYTHON_PATH="C:\Program Files\INRO\Emme\Emme 4\Emme-4.4.2\Python27" -REM SET EMMEPATH=C:\Program Files\INRO\Emme\Emme 4\Emme-test-4.4.5-v1 -SET EMMEPATH=C:\Program Files\INRO\Emme\Emme 4\Emme-4.4.5.1 -SET EMME_PYTHON_PATH="C:\Program Files\INRO\Emme\Emme 4\Emme-4.4.5.1\Python27" -:: BEWARE path issues with other python installs -path=%EMMEPATH%\programs;%EMMEPATH%\Python27;%EMMEPATH%\Python27\Scripts\;%PATH% -SET NUMBER_OF_PROCESSORS=56 +:: Location of Emme installation +IF ENVTYPE==RSG ( + SET EMME_PATH=C:\Program Files\INRO\Emme\Emme 4\Emme-4.4.5.1 +) ELSE ( + SET EMME_PATH=C:\Program Files\INRO\Emme\Emme 4\Emme-4.6.0 +) -:: The location of the Python executable -:: set PYTHON_PATH=C:\Program Files\anaconda2 -set PYTHON_PATH=D:\Anaconda2 -REM set PYTHON_PATH=D:\Anaconda3 +IF ENVTYPE==RSG ( + SET NUMBER_OF_PROCESSORS=56 +) ELSE ( + set NUMBER_OF_PROCESSORS=24 +) +:: The location of the Python executable -- for MTC, use the %TM2_PYTHON_CONDA_ENV% python +IF ENVTYPE==RSG ( + set PYTHON_PATH=D:\Anaconda2 +) ELSE ( + set PYTHON_PATH=C:\Users\%USERNAME%\.conda\envs\%TM2_PYTHON_CONDA_ENV% +) :: The location of the main JAR file set RUNTIME=CTRAMP/runtime @@ -31,12 +44,14 @@ set JAVA_32_PORT=1190 set MATRIX_MANAGER_PORT=1191 set HH_MANAGER_PORT=1129 +:: TODO: where is this used? rem set machine names SET MAIN=WRJMDLPPW08 rem SET MTC01=W-AMPDX-D-SAG01 SET MTC02=WRJMDLPPW08 rem SET MTC03=W-AMPDX-D-SAG10 +:: TODO: Where is this used? rem SET node_runner_MAIN=runMtc04 rem SET node_runner_MTC01=runMtc01 SET node_runner_MTC02=runMtc02 @@ -54,16 +69,24 @@ SET HOST_IP_ADDRESS=%IPADDRESS% set HHMGR_IP=10.0.1.46 :: Machine running matrix data manager -SET MATRIX_SERVER=\\%MTC02% -SET MATRIX_SERVER_BASE_DIR=%MATRIX_SERVER%\e$\projects\clients\MTC\%SCEN% -SET MATRIX_SERVER_ABSOLUTE_BASE_DIR=e:\projects\clients\MTC\%SCEN% -SET MATRIX_SERVER_JAVA_PATH=C:\Program Files\Java\jre1.8.0_261 +IF ENVTYPE==RSG ( + SET MATRIX_SERVER=\\%MTC02% + SET MATRIX_SERVER_BASE_DIR=%MATRIX_SERVER%\e$\projects\clients\MTC\%SCEN% + SET MATRIX_SERVER_ABSOLUTE_BASE_DIR=e:\projects\clients\MTC\%SCEN% + SET MATRIX_SERVER_JAVA_PATH=C:\Program Files\Java\jre1.8.0_261 +) ELSE ( + set MATRIX_SERVER=localhost +) :: Machine running household data manager -SET HH_SERVER=\\%MTC02% -SET HH_SERVER_BASE_DIR=%HH_SERVER%\e$\projects\clients\MTC\%SCEN% -SET HH_SERVER_ABSOLUTE_BASE_DIR=e:\projects\clients\MTC\%SCEN% -SET HH_SERVER_JAVA_PATH=C:\Program Files\Java\jre1.8.0_261 +IF ENVTYPE==RSG ( + SET HH_SERVER=\\%MTC02% + SET HH_SERVER_BASE_DIR=%HH_SERVER%\e$\projects\clients\MTC\%SCEN% + SET HH_SERVER_ABSOLUTE_BASE_DIR=e:\projects\clients\MTC\%SCEN% + SET HH_SERVER_JAVA_PATH=C:\Program Files\Java\jre1.8.0_261 +) ELSE ( + set HH_SERVER=localhost +) rem set main property file name set PROPERTIES_NAME=sandag_abm @@ -78,5 +101,6 @@ rem location of mapThenRun.bat on remote machines set MAPANDRUN=CTRAMP\runtime\mapThenRunNew.bat rem account settings for remote access using psexec -SET USERNAME=redacted -SET PASSWORD=redacted +rem USERNAME is a system variable; DO NOT overrride this! +rem SET USERNAME=redacted +rem SET PASSWORD=redacted diff --git a/model-files/scripts/assign/taz_matrix_transfer.py b/model-files/scripts/assign/taz_matrix_transfer.py index df9dc533..619f5e35 100644 --- a/model-files/scripts/assign/taz_matrix_transfer.py +++ b/model-files/scripts/assign/taz_matrix_transfer.py @@ -37,7 +37,7 @@ #transfer the zone numberings for i in range(len(in_files)): with open(os.path.join(base_dir,in_files[i])) as f: - with open(os.path.join(base_dir,out_files[i]),'wb') as of: + with open(os.path.join(base_dir,out_files[i]),'w') as of: first = True for line in f: line = line.strip() diff --git a/model-files/scripts/nonres/BuildAirPax.job b/model-files/scripts/nonres/BuildAirPax.job index d2e94ab8..ad5e4092 100644 --- a/model-files/scripts/nonres/BuildAirPax.job +++ b/model-files/scripts/nonres/BuildAirPax.job @@ -66,7 +66,7 @@ ;start cluster nodes -*Cluster.exe MTC_AIRPAX 1-12 start exit +*"%TPP_PATH%\Cluster.exe" MTC_AIRPAX 1-12 start exit token_taz_count = %TAZ_COUNT% @@ -511,4 +511,4 @@ endrun *DEL nonres\2035*.mtx ;stop cluster nodes -*Cluster.exe MTC_AIRPAX 1-12 close exit +*"%TPP_PATH%\Cluster.exe" MTC_AIRPAX 1-12 close exit diff --git a/model-files/scripts/preprocess/BuildTazNetworks.job b/model-files/scripts/preprocess/BuildTazNetworks.job index d7d589f0..27399735 100644 --- a/model-files/scripts/preprocess/BuildTazNetworks.job +++ b/model-files/scripts/preprocess/BuildTazNetworks.job @@ -17,7 +17,7 @@ ; ---------------------------------------------------------------------------------------------------------------- ;start cluster nodes -*Cluster.exe MTC_HWYNET 1-5 start exit +*"%TPP_PATH%\Cluster.exe" MTC_HWYNET 1-5 start exit ; do, more or less, the same skim procedure for each of the five time periods loop period = 1, 5 @@ -166,4 +166,4 @@ endloop ; token_period Wait4Files files = MTC_HWYNET1.script.end, MTC_HWYNET2.script.end, MTC_HWYNET3.script.end, MTC_HWYNET4.script.end, MTC_HWYNET5.script.end, printfiles = merge, deldistribfiles = t, CheckReturnCode = t ;stop cluster nodes -*Cluster.exe MTC_HWYNET 1-5 close exit \ No newline at end of file +*"%TPP_PATH%\Cluster.exe" MTC_HWYNET 1-5 close exit \ No newline at end of file diff --git a/model-files/scripts/preprocess/codeLinkAreaType.py b/model-files/scripts/preprocess/codeLinkAreaType.py index 67c488c7..b123b89f 100644 --- a/model-files/scripts/preprocess/codeLinkAreaType.py +++ b/model-files/scripts/preprocess/codeLinkAreaType.py @@ -15,9 +15,9 @@ AREA_TYPE_FILE = os.path.join(model_run_dir,r'hwy\link_area_type.csv') BUFF_DIST = 5280 * 0.5 -print "Reading MAZ data" +print("Reading MAZ data") mazData = [] -with open(MAZ_DATA_FILE, 'rb') as csvfile: +with open(MAZ_DATA_FILE, 'r') as csvfile: mazreader = csv.reader(csvfile, skipinitialspace=True) for row in mazreader: mazData.append(row) @@ -36,11 +36,11 @@ orig_maz_id = row[mazDataColNames.index("MAZ_ORIGINAL")] origMazToSeqMaz[orig_maz_id] = maz -print "Reading nodes" +print("Reading nodes") mazs = dict() nodes = dict() spIndexMaz = index.Index() -with open(NODE_CSV_FILE,'rb') as node_file: +with open(NODE_CSV_FILE,'r') as node_file: node_reader = csv.reader(node_file,skipinitialspace=True) for row in node_reader: n = row[0] @@ -52,7 +52,7 @@ spIndexMaz.insert(int(origMazToSeqMaz[n]), (xCoord, yCoord, xCoord, yCoord)) nodes[n] = [n, xCoord, yCoord] -print "Calculate buffered MAZ measures" +print("Calculate buffered MAZ measures") for k in mazLandUse.keys(): #get maz data @@ -95,10 +95,10 @@ else: mazLandUse[k][6] = 0 #regional core -print "Find nearest MAZ for each link, take min area type of A or B node" +print("Find nearest MAZ for each link, take min area type of A or B node") lines = ["A,B,AREATYPE" + os.linesep] -with open(LINK_CSV_FILE,'rb') as link_file: +with open(LINK_CSV_FILE,'r') as link_file: link_reader = csv.reader(link_file,skipinitialspace=True) for row in link_reader: a = int(row[0]) @@ -123,7 +123,7 @@ lines.append("%i,%i,%i%s" % (a, b, linkAT, os.linesep)) #create output file -print "Write link area type CSV file" -outFile = open(AREA_TYPE_FILE, "wb") +print("Write link area type CSV file") +outFile = open(AREA_TYPE_FILE, "w") outFile.writelines(lines) outFile.close() diff --git a/model-files/scripts/preprocess/createMazDensityFile.py b/model-files/scripts/preprocess/createMazDensityFile.py index 1ae5aa3f..c2538edc 100644 --- a/model-files/scripts/preprocess/createMazDensityFile.py +++ b/model-files/scripts/preprocess/createMazDensityFile.py @@ -23,7 +23,7 @@ # duDenBin Houseold density bin (1 through 3 where 3 is the highest) # landuse\maz_data_withDensity.csv: landuse\maz_data.csv joined with landuse\maz_density.csv on MAZ_ORIGINAL # -# Requires: Basic python 2.7.x, pandas +# Requires: python 2.7.x or python 3, pandas # # Import modules @@ -39,10 +39,10 @@ outMazData = "landuse\maz_data_withDensity.csv" start_time = datetime.datetime.now() -print inMazNodes -print inIntersectionNodes -print inMazData -print outDensityData +print(inMazNodes) +print(inIntersectionNodes) +print(inMazData) +print(outDensityData) # Open intersection file as pandas table intersections = pd.read_csv(inIntersectionNodes) @@ -87,7 +87,7 @@ intersections['distance'] = intersections.eval("((X-maz_x)**2 + (Y-maz_y)**2)**0.5") int_cnt.append(len(intersections[intersections.distance <= max_dist])) if((n % 1000) == 0): - print "Counting Intersections for MAZ ", maz_n, " : ", int_cnt[n] + print("Counting Intersections for MAZ {} : {}".format(maz_n,int_cnt[n])) n = n + 1 readMazNodeFile.close() @@ -113,7 +113,7 @@ maz_nonseqn = mazData['MAZ_ORIGINAL'].tolist() # create writer -writeMazDensityFile = open(outDensityData, "wb") +writeMazDensityFile = open(outDensityData, "w") writer = csv.writer(writeMazDensityFile, delimiter=',') outHeader = ["MAZ_ORIGINAL","TotInt","EmpDen","RetEmpDen","DUDen","PopDen","IntDenBin","EmpDenBin","DuDenBin","PopEmpDenPerMi"] writer.writerow(outHeader) @@ -147,7 +147,7 @@ mazData['dest_y'] = maz_y_seq[i] if((i ==0) or (i % 100) == 0): - print "Calculating Density Variables for MAZ ", origNonSeqMaz + print("Calculating Density Variables for MAZ {}".format(origNonSeqMaz)) #sum the variables for all mazs within the max distance @@ -222,4 +222,4 @@ end_time = datetime.datetime.now() duration = end_time - start_time -print "*** Finished in {} minutes ***".format(duration.total_seconds()/60.0) +print("*** Finished in {} minutes ***".format(duration.total_seconds()/60.0)) diff --git a/model-files/scripts/preprocess/csvToDbf.py b/model-files/scripts/preprocess/csvToDbf.py index 4fd7b287..114c8493 100644 --- a/model-files/scripts/preprocess/csvToDbf.py +++ b/model-files/scripts/preprocess/csvToDbf.py @@ -7,7 +7,7 @@ Try assuming ints, then floats, then strings. """ -from dbfpy import dbf +import dbfpy3 import argparse,collections,csv,os,sys if __name__ == '__main__': @@ -28,6 +28,8 @@ col_list = row for colname in row: dbf_colname = colname[:10] + # make it upper case + dbf_colname = dbf_colname.upper() if len(colname) > 10: print("Truncating column {} to {}", colname, dbf_colname) columns[colname] = [dbf_colname, "N", 10] # try int first continue @@ -60,11 +62,12 @@ print("Read {} and determined dbf columns".format(args.input_csv)) # create the dbf - new_dbf = dbf.Dbf(args.output_dbf, new=True) + new_dbf = dbfpy3.dbf.Dbf(args.output_dbf, new=True) for col in columns.keys(): - # print "{} : {}".format(col, columns[col]) - new_dbf.addField(columns[col]) + # print("{} : {}".format(col, columns[col])) + # dbfpy3 wants type_code, name, length + new_dbf.add_field( (columns[col][1], columns[col][0], columns[col][2]) ) csvfile = open(args.input_csv) csvreader = csv.reader(csvfile) @@ -75,19 +78,21 @@ header = True continue - rec = new_dbf.newRecord() + rec = new_dbf.new() for col_idx in range(len(row)): colname = col_list[col_idx] + dbf_colname = columns[colname][0] + print(dbf_colname) if columns[colname][1] == "N" and len(columns[colname]) == 3: - rec[ columns[colname][0] ] = int(row[col_idx]) + rec[ dbf_colname ] = int(row[col_idx]) elif columns[colname][1] == "N": - rec[ columns[colname][0] ] = float(row[col_idx]) + rec[ dbf_colname] = float(row[col_idx]) else: - rec[ columns[colname][0] ] = row[col_idx] - rec.store() + rec[ dbf_colname ] = row[col_idx] + new_dbf.write(rec) csvfile.close() - print new_dbf + print(new_dbf) new_dbf.close() print("Wrote {}".format(args.output_dbf)) diff --git a/model-files/scripts/preprocess/interchange_distance.py b/model-files/scripts/preprocess/interchange_distance.py deleted file mode 100644 index 882b30fe..00000000 --- a/model-files/scripts/preprocess/interchange_distance.py +++ /dev/null @@ -1,80 +0,0 @@ -# Calculate upstream and downstream interchange distance - -# Interchange nodes are labeled in Cube script -# This script looks through links to determine shortest paths to interchanges -# from SANDAG ABM: import_network.py - -import heapq as _heapq -import csv -import os -import pandas as pd -import argparse - -def csvImport(csvfile): - # read from CSV - df = pd.read_csv(csvfile, sep=',',dtype={'A': int,'B': int,'FT': int,'DISTANCE': float,'INTXNODE': str,'INTXLOC': str}) - return df - -def csvExport(df,csvfile): - # write data back to CSV - df.to_csv(csvfile, index=False) - -def interchange_distance(orig_link, direction): - visited = set([]) - visited_add = visited.add - back_links = {} - heap = [] - if direction == "DOWNSTREAM": - #get_links = lambda l: l.j_node.outgoing_links() - #get_links = lambda l: links.loc[(links.A == l.B) | ((links.B == l.B) & (links.A != l.A))] - # Adds link distances until reaching a node tagged BEFORE/AFTER interchange. - # These are currently set for all freeway ramps by locating nodes where incoming links != outgoing links - # TODO update nodes to only tag major interchanges - get_links = lambda l: links.loc[(links.A == l.B)] - check_far_node = lambda l: True if l.INTXB == "BEFORE" or l.INTXB == "AFTER" else False - elif direction == "UPSTREAM": - #get_links = lambda l: l.i_node.incoming_links() - #get_links = lambda l: links.loc[((links.A == l.A) & (links.B != l.B)) | (links.B == l.A)] - # TODO update nodes to only tag major interchanges - get_links = lambda l: links.loc[(links.B == l.A)] - check_far_node = lambda l: True if l.INTXA == "BEFORE" or l.INTXA == "AFTER" else False - # Shortest path search for nearest interchange node along freeway - for link in get_links(orig_link).itertuples(): - _heapq.heappush(heap, (link.DISTANCE, link)) - interchange_found = False - try: - while not interchange_found: - link_cost, link = _heapq.heappop(heap) - if link in visited: - continue - visited_add(link) - #pdb.set_trace() - if check_far_node(link): - interchange_found = True - break - for next_link in get_links(link).itertuples(): - if next_link in visited: - continue - next_cost = link_cost + link.DISTANCE - _heapq.heappush(heap, (next_cost, next_link)) - except IndexError: - # IndexError if heap is empty - # case where start / end of highway, dist = 99 - return 99 - return (orig_link.DISTANCE / 2.0) + link_cost - -parser = argparse.ArgumentParser(description="Find nearest freeway interchanges", formatter_class=argparse.RawDescriptionHelpFormatter,) -parser.add_argument("input_csv", metavar="input.csv", help="Input csv link table") -parser.add_argument("output_csv", metavar="output.csv", help="Output csv file") - -args = parser.parse_args() - -links = csvImport(args.input_csv) -links["downdist"] = None -links["updist"] = None - -for i, link in links.iterrows(): - links.at[i,"downdist"] = interchange_distance(link, "DOWNSTREAM") - links.at[i,"updist"] = interchange_distance(link, "UPSTREAM") - -csvExport(links,args.output_csv) diff --git a/model-files/scripts/preprocess/renumber.py b/model-files/scripts/preprocess/renumber.py index 991bb9e5..c4b09128 100644 --- a/model-files/scripts/preprocess/renumber.py +++ b/model-files/scripts/preprocess/renumber.py @@ -102,8 +102,8 @@ data_df[output_new_col] = new_col else: pass - except Exception, e: - print e + except Exception as e: + print(e) print(data_df.head()) diff --git a/model-files/scripts/preprocess/tap_data_builder.py b/model-files/scripts/preprocess/tap_data_builder.py index 331b847f..f918c391 100644 --- a/model-files/scripts/preprocess/tap_data_builder.py +++ b/model-files/scripts/preprocess/tap_data_builder.py @@ -36,11 +36,11 @@ infile = os.path.join(base_dir,'hwy', 'tap_to_taz_for_parking.txt') outfile = os.path.join(base_dir,'hwy', 'tap_data.csv') - sequence_mapping = pandas.DataFrame.from_csv(zone_seq_mapping_file) + sequence_mapping = pandas.read_csv(zone_seq_mapping_file) sequence_mapping.reset_index(inplace=True) - tap_data = pandas.read_table(infile, names=['TAP_original','TAZ_original','TAZ2','SP_DISTANCE','FEET'], - delimiter=',') + tap_data = pandas.read_csv(infile, names=['TAP_original','TAZ_original','TAZ2','SP_DISTANCE','FEET'], + delimiter=',') tap_data_grouped = tap_data.groupby('TAP_original') tap_data_out_init = False @@ -56,7 +56,7 @@ tap_data_out_init = True except KeyError: - print 'tap %8d not captured in tap->taz (for parking) script' % row['N'] + print('tap {} not captured in tap->taz (for parking) script'.format(row['N'])) # use the last one -- does this make sense? use_this = tap_data_out.tail(1).copy() use_this.loc[:,'TAP_original'] = row['N'] @@ -84,4 +84,4 @@ # reorder and write tap_data_out = tap_data_out[['TAP','TAP_original','lotid','TAZ','capacity']] tap_data_out.to_csv(outfile, index=False) - print "Wrote %s" % outfile \ No newline at end of file + print("Wrote {}".format(outfile)) diff --git a/model-files/scripts/preprocess/zone_seq_disseminator.py b/model-files/scripts/preprocess/zone_seq_disseminator.py index e4d30d58..7684d637 100644 --- a/model-files/scripts/preprocess/zone_seq_disseminator.py +++ b/model-files/scripts/preprocess/zone_seq_disseminator.py @@ -51,7 +51,7 @@ def map_data(filename, sequence_mapping, mapping_dict): dframe = pandas.read_csv(filename) dframe.reset_index(inplace=True) - for mapkey, mapdef in mapping_dict.iteritems(): + for (mapkey, mapdef) in mapping_dict.items(): # delete mapkey if it's already there if mapkey in list(dframe.columns.values): dframe.drop(mapkey, axis=1, inplace=True) @@ -71,7 +71,7 @@ def map_data(filename, sequence_mapping, mapping_dict): # write it dframe.to_csv(filename, index=False, float_format="%.9f") - print "Wrote %s" % filename + print("Wrote {}".format(filename)) return dframe if __name__ == '__main__': @@ -112,7 +112,7 @@ def map_data(filename, sequence_mapping, mapping_dict): parkarea.rename(columns={'MAZ':'mgra'}, inplace=True) parkarea = parkarea[['a','mgra','parkarea']] parkarea.to_csv(park_location_alts_file, index=False) - print "Wrote %s" % park_location_alts_file + print("Wrote {}".format(park_location_alts_file)) ######### dc alternatives ? dcalts = maz_data[['MAZ','TAZ']] @@ -122,16 +122,16 @@ def map_data(filename, sequence_mapping, mapping_dict): dcalts.rename(columns={'MAZ':'mgra', 'TAZ':'dest'}, inplace=True) dcalts = dcalts[['a','mgra','dest']] dcalts.to_csv(dc_alts_file, index=False) - print "Wrote %s" % dc_alts_file + print("Wrote {}".format(dc_alts_file)) ######### these seem truly pointless dcalts.drop('dest', axis=1, inplace=True) dcalts.to_csv(parking_soa_alts_file, index=False) - print "Wrote %s" % parking_soa_alts_file + print("Wrote {}".format(parking_soa_alts_file)) soa_dist_alts = taz_data[['TAZ']] soa_dist_alts.loc[:,'a'] = range(1, soa_dist_alts.shape[0] + 1) # ??? soa_dist_alts.rename(columns={'TAZ':'dest'}, inplace=True) soa_dist_alts = soa_dist_alts[['a','dest']] soa_dist_alts.to_csv(soa_dist_alts_file, index=False) - print "Wrote %s" % soa_dist_alts_file \ No newline at end of file + print("Wrote {}".format(soa_dist_alts_file)) diff --git a/model-files/scripts/skims/BuildTransitNetworks.job b/model-files/scripts/skims/BuildTransitNetworks.job index 8ea4fe38..14e294a7 100644 --- a/model-files/scripts/skims/BuildTransitNetworks.job +++ b/model-files/scripts/skims/BuildTransitNetworks.job @@ -180,7 +180,7 @@ ENDRUN *del hwy\mtc_transit_network_temp3.net ;start cluster nodes -*Cluster.exe MTC_TRANNET 1-5 start exit +*"%TPP_PATH%\Cluster.exe" MTC_TRANNET 1-5 start exit ;now build transit times based on congested times from loaded network ;loop over time period @@ -239,4 +239,4 @@ Wait4Files files = MTC_TRANNET1.script.end, MTC_TRANNET2.script.end, MTC_TRANNET ;stop cluster nodes -*Cluster.exe MTC_TRANNET 1-5 close exit +*"%TPP_PATH%\Cluster.exe" MTC_TRANNET 1-5 close exit diff --git a/model-files/scripts/skims/HwySkims.job b/model-files/scripts/skims/HwySkims.job index 44b95a84..8ec08393 100644 --- a/model-files/scripts/skims/HwySkims.job +++ b/model-files/scripts/skims/HwySkims.job @@ -135,7 +135,7 @@ read file = %BASE_SCRIPTS%\block\hwyparam.block hwy_block_param = '%BASE_SCRIPTS%\block\hwyparam.block' ;start cluster nodes -*Cluster.exe MTC_HWYSKIM 1-30 start exit +*"%TPP_PATH%\Cluster.exe" MTC_HWYSKIM 1-30 start exit ;first, do taz-taz skims @@ -647,4 +647,4 @@ Wait4Files files = MTC_HWYSKIM1.script.end, MTC_HWYSKIM2.script.end, MTC_HWYSKIM ;stop cluster nodes -*Cluster.exe MTC_HWYSKIM 1-30 close exit +*"%TPP_PATH%\Cluster.exe" MTC_HWYSKIM 1-30 close exit diff --git a/model-files/scripts/skims/MazMazSkims.job b/model-files/scripts/skims/MazMazSkims.job index ac29a19a..804eae40 100644 --- a/model-files/scripts/skims/MazMazSkims.job +++ b/model-files/scripts/skims/MazMazSkims.job @@ -160,7 +160,7 @@ LOOP CLASS = 1,1 ENDLOOP ;start cluster nodes -*Cluster.exe MTC_HWYMAZSKIM 1-9 start exit +*"%TPP_PATH%\Cluster.exe" MTC_HWYMAZSKIM 1-9 start exit ;loop by county, but just use free flow speed LOOP PROC_COUNT=1,9 @@ -268,7 +268,7 @@ LOOP PROC_COUNT=1,9 ENDRUN ;run script - *Voyager.exe @SFILE@_@SKIM_COUNTY@.s /Start + *"%TPP_PATH%\Voyager.exe" @SFILE@_@SKIM_COUNTY@.s /Start ;clean-up *del @SFILE@_@SKIM_COUNTY@.s @@ -289,7 +289,7 @@ Wait4Files Files=MTC_HWYMAZSKIM1.script.end, MTC_HWYMAZSKIM2.script.end, MTC_HWY checkreturncode=T, printfiles=MERGE, deldistribfiles=T ;stop cluster nodes -*Cluster.exe MTC_HWYMAZSKIM 1-9 close exit +*"%TPP_PATH%\Cluster.exe" MTC_HWYMAZSKIM 1-9 close exit ;merge files diff --git a/model-files/scripts/skims/NonMotorizedSkims.job b/model-files/scripts/skims/NonMotorizedSkims.job index 41cdeb4e..6a744080 100644 --- a/model-files/scripts/skims/NonMotorizedSkims.job +++ b/model-files/scripts/skims/NonMotorizedSkims.job @@ -115,7 +115,7 @@ tap_mn = '(' + county_selector_mn + ' & ' + tap_selector + ')' ;start cluster nodes -*"C:\Program Files (x86)\Citilabs\Cube\Cluster.exe" MTC_NONMOT 1-9 start exit +*"%TPP_PATH%\Cluster.exe" MTC_NONMOT 1-9 start exit SLEEP TIME = 10 @@ -251,7 +251,7 @@ LOOP PROC_COUNT=1,9 ENDRUN ;run script - *Voyager.exe @SFILE@_@SKIM_COUNTY@.s /Start + *"%TPP_PATH%\Voyager.exe" @SFILE@_@SKIM_COUNTY@.s /Start ;clean-up ; *del @SFILE@_@SKIM_COUNTY@.s @@ -272,7 +272,7 @@ Wait4Files Files=MTC_NONMOT1.script.end, MTC_NONMOT2.script.end, MTC_NONMOT3.scr checkreturncode=T, printfiles=MERGE, deldistribfiles=T ;stop cluster nodes -*Cluster.exe MTC_NONMOT 1-9 close exit +*"%TPP_PATH%\Cluster.exe" MTC_NONMOT 1-9 close exit ;merge files *copy skims\ped_distance_maz_maz_1.txt+skims\ped_distance_maz_maz_2.txt+skims\ped_distance_maz_maz_3.txt+skims\ped_distance_maz_maz_4.txt+skims\ped_distance_maz_maz_5.txt+skims\ped_distance_maz_maz_6.txt+skims\ped_distance_maz_maz_7.txt+skims\ped_distance_maz_maz_8.txt+skims\ped_distance_maz_maz_9.txt skims\ped_distance_maz_maz.txt diff --git a/model-files/scripts/skims/TransitSkimsPrep.job b/model-files/scripts/skims/TransitSkimsPrep.job index 900897f9..4187ddca 100644 --- a/model-files/scripts/skims/TransitSkimsPrep.job +++ b/model-files/scripts/skims/TransitSkimsPrep.job @@ -92,7 +92,7 @@ SKIM_MATRIX_DEF = '%BASE_SCRIPTS%\block\skim_matrix_definitions.block' ;first, build transit drive access skims ;export highway skims as csv files -*Cluster.exe MTC_HWYSKIM 1-5 start exit +*"%TPP_PATH%\Cluster.exe" MTC_HWYSKIM 1-5 start exit loop period = 1,5 @@ -125,7 +125,7 @@ ENDLOOP Wait4Files files = MTC_HWYSKIM1.script.end, MTC_HWYSKIM2.script.end, MTC_HWYSKIM3.script.end, MTC_HWYSKIM4.script.end, MTC_HWYSKIM5.script.end, printfiles = merge, deldistribfiles = t, CheckReturnCode = t -*Cluster.exe MTC_HWYSKIM 1-5 close exit +*"%TPP_PATH%\Cluster.exe" MTC_HWYSKIM 1-5 close exit ;export taps connectors from network RUN PGM=NETWORK @@ -150,18 +150,17 @@ RUN PGM=NETWORK ENDPHASE ENDRUN -;call python program to build transit drive access skims -*python %BASE_SCRIPTS%\skims\build_drive_access_skims.py . %BASE_SCRIPTS%\block - +; build transit drive access skims +*"%PYTHON_PATH%\python" %BASE_SCRIPTS%\skims\build_drive_access_skims.py . %BASE_SCRIPTS%\block IF (RETURNCODE > 0) abort ;*del skims\DA_*_taz_time.csv ; run script used to help consolidate taps -*python %BASE_SCRIPTS%\skims\tap_lines.py +*"%PYTHON_PATH%\python" %BASE_SCRIPTS%\skims\tap_lines.py IF (RETURNCODE > 0) abort ;rebuild transit line file with new numbers -*python %BASE_SCRIPTS%\skims\build_new_transit_line.py "trn\transitLines.lin" "trn\transitLines_new_nodes.lin" "hwy\mtc_transit_network_tap_to_node.txt" +*"%PYTHON_PATH%\python" %BASE_SCRIPTS%\skims\build_new_transit_line.py "trn\transitLines.lin" "trn\transitLines_new_nodes.lin" "hwy\mtc_transit_network_tap_to_node.txt" IF (RETURNCODE > 0) abort ;now build transit times based on congested times from loaded network diff --git a/model-files/scripts/skims/apply_fares.py b/model-files/scripts/skims/apply_fares.py index 30d51786..73e87693 100644 --- a/model-files/scripts/skims/apply_fares.py +++ b/model-files/scripts/skims/apply_fares.py @@ -81,7 +81,7 @@ def execute(self): line.segment(-1).allow_boardings = False self._log.append({"type": "header", "content": "Base fares by faresystem"}) - for fs_id, fs_data in faresystems.iteritems(): + for (fs_id, fs_data) in faresystems.items(): self._log.append( {"type": "text", "content": "FAREZONE {}: {} {}".format(fs_id, fs_data["STRUCTURE"], fs_data["NAME"])}) lines = fs_data["LINES"] @@ -114,14 +114,14 @@ def execute(self): except Exception as error: self._log.append({"type": "text", "content": "error during apply fares"}) - self._log.append({"type": "text", "content": unicode(error)}) + self._log.append({"type": "text", "content": str(error)}) self._log.append({"type": "text", "content": _traceback.format_exc()}) raise finally: log_content = [] header = ["NUMBER", "NAME", "NUM LINES", "NUM SEGMENTS", "MODES", "FAREMATRIX ID", "NUM ZONES", "NUM MATRIX RECORDS"] - for fs_id, fs_data in faresystems.iteritems(): + for (fs_id, fs_data) in faresystems.items(): log_content.append([str(fs_data.get(h, "")) for h in header]) self._log.insert(0, { "content": log_content, @@ -241,7 +241,7 @@ def generate_fromto_approx(self, network, lines, fare_matrix, fs_data): zone_nodes[farezone].add(seg.i_node) self._log.append( {"type": "text2", "content": "Farezone IDs and node count: %s" % ( - ", ".join(["%s: %s" % (k, len(v)) for k, v in zone_nodes.iteritems()]))}) + ", ".join(["%s: %s" % (k, len(v)) for (k, v) in zone_nodes.items()]))}) # Two cases: # - zone / area fares with boundary crossings, different FS may overlap: @@ -256,7 +256,7 @@ def generate_fromto_approx(self, network, lines, fare_matrix, fs_data): count_single_node_zones = 0.0 count_multi_node_zones = 0.0 - for zone, nodes in zone_nodes.iteritems(): + for (zone, nodes) in zone_nodes.items(): if len(nodes) > 1: count_multi_node_zones += 1.0 else: @@ -332,7 +332,7 @@ def zone_boundary_crossing_approx(self, lines, valid_farezones, fare_matrix, fs_ if board_cost is None: # use the smallest fare found from this farezone as best guess # as a reasonable boarding cost - board_cost = min(fare_matrix[farezone].itervalues()) + board_cost = min(fare_matrix[farezone].values()) self._log.append({ "type": "text3", "content": farezone_warning3 % ( @@ -491,7 +491,7 @@ def faresystem_distances(self, faresystems, network): {"type": "header", "content": "Faresystem distances"}) self._log.append( {"type": "text2", "content": "Max transfer distance: %s" % MAX_TRANSFER_DISTANCE}) - for fs_index, fs_data in enumerate(faresystems.itervalues()): + for (fs_index, fs_data) in faresystems.items(): stops = set([]) for line in fs_data["LINES"]: for stop in line.segments(True): @@ -504,10 +504,10 @@ def faresystem_distances(self, faresystems, network): # get distances between every pair of zone systems # determine transfer fares which are too far away to be used - for fs_id, fs_data in faresystems.iteritems(): + for (fs_id, fs_data) in faresystems.items(): fs_data["distance"] = [] fs_data["xfer_fares"] = xfer_fares = {} - for fs_id2, fs_data2 in faresystems.iteritems(): + for (fs_id2, fs_data2) in faresystems.items(): if fs_data["NUM LINES"] == 0 or fs_data2["NUM LINES"] == 0: distance = "n/a" elif fs_id == fs_id2: @@ -540,7 +540,7 @@ def faresystem_distances(self, faresystems, network): xfer_fares[fs_id2] = xfer distance_table = [["p/q"] + list(faresystems.keys())] - for fs, fs_data in faresystems.iteritems(): + for (fs, fs_data) in faresystems.items(): distance_table.append([fs] + [("%.0f" % d if isinstance(d, float) else d) for d in fs_data["distance"]]) self._log.append( {"type": "text2", "content": "Table of distance between stops in faresystems (feet)"}) @@ -562,7 +562,7 @@ def matching_xfer_fares(xfer_fares_list1, xfer_fares_list2): # first pass: only group by matching mode patterns to minimize the number # of levels with multiple modes group_xfer_fares_mode = [] - for fs_id, fs_data in faresystems.iteritems(): + for (fs_id, fs_data) in faresystems.items(): fs_modes = fs_data["MODE_SET"] if not fs_modes: continue @@ -648,7 +648,7 @@ def generate_transfer_fares(self, faresystems, faresystem_groups, network): line.vehicle = temp_veh network.delete_transit_vehicle(vehicle) new_veh = network.create_transit_vehicle(veh_id, meta_mode.id) - for a, v in attributes.iteritems(): + for (a, v) in attributes.items(): new_veh[a] = v for line in lines[veh_id]: line.vehicle = new_veh diff --git a/model-files/scripts/skims/build_drive_access_skims.py b/model-files/scripts/skims/build_drive_access_skims.py index 955bc3ab..7a4b22ed 100644 --- a/model-files/scripts/skims/build_drive_access_skims.py +++ b/model-files/scripts/skims/build_drive_access_skims.py @@ -79,7 +79,7 @@ periods = ['EA','AM','MD','PM','EV'] -print 'reading node->taz/maz/tap sequence mapping' +print('reading node->taz/maz/tap sequence mapping') seq_mapping = {} tazseq_mapping = {} mazseq_mapping = {} @@ -100,7 +100,7 @@ seq_mapping[ int(data["N" ])] = int(data["EXTSEQ"]) extseq_mapping[int(data["EXTSEQ"])] = int(data["N" ]) -print 'reading maz->taz' +print('reading maz->taz') #read maz->taz mapping mazn_tazn_mapping = {} #maz,taz @@ -115,7 +115,7 @@ mazn_tazn_mapping[int(data[col_maz])] = int(data[col_taz]) #read param block -print 'reading hwy parameter block data' +print('reading hwy parameter block data') block_data = {} for line in open(hwy_parameter_block_file): line = line.strip() @@ -133,15 +133,22 @@ vot = 0.6 / block_data['VOT'] #turn into minutes / cents walk_rate = 60.0 / 3.0 / 5280.0 -print 'reading maz->tap skims and building tap->maz/taz lookup' +print('reading maz->tap skims and building tap->maz/taz lookup') #read maz->tap walk skims #build tap-> (closest) (maz,taz,maz->tap walk_time) tapn_tazn_lookup = {} tapns = {} for line in open(ped_maz_tap_distance_file): line = line.strip().split(',') - mazn = mazseq_mapping[int(line[0])] - tapn = tapseq_mapping[int(line[1])] + try: + mazn = mazseq_mapping[int(line[0])] + tapn = tapseq_mapping[int(line[1])] + except Exception as e: + if line==['']: continue # continue blank line + if line==['\x1a']: continue # continue on EOF -- for loop will end + print(e) + print("line={}".format(line)) + raise e distance = float(line[4]) walk_time = walk_rate*distance tapns[tapn] = None @@ -152,7 +159,7 @@ tapns.sort() -print 'reading transit lines from {}'.format(transit_line_file) +print('reading transit lines from {}'.format(transit_line_file)) #read transit lines to pull out tod and stop information stops_by_tod_and_mode = {} for period in periods: @@ -230,12 +237,12 @@ trn_line = "" -print 'building tap->mode' +print('building tap->mode') all_tapn = [] for line in open(network_tap_nodes_file): all_tapn.append(int(line)) -print 'building tod->mode->taps' +print('building tod->mode->taps') tod_mode_tapn = {} for period in periods: tod_mode_tapn[period] = {} @@ -252,7 +259,7 @@ stopn = a #stops_by_tod_and_mode[periods[i]][mode][n] if not tapn in all_tapn: - print 'tapn not found in (' + str(a) + ',' + str(b) + ')' + print('tapn not found in ({}),{})'.format(a,b)) continue # mode = tapn_to_mode[tapn] for mode_id in id_mode_map: @@ -268,7 +275,7 @@ tapn ] # closest (mazn,tazn,walk_time from mazn to tapn,walk_distance from mazn to tapn) -print 'taps with no (apparent) walk access: ' + str(isolated_tapns.keys()) +print('taps with no (apparent) walk access: {}'.format(isolated_tapns.keys())) # ------------------------------------------ @@ -293,7 +300,7 @@ # (building separately because each time period has different skims and network) total_table_array = [] for period in periods: - print 'building drive access skims for period ' + period + print('building drive access skims for period {}'.format(period)) # grab taps serviced in this period and their tazs taps_for_period = tod_mode_tapn_df.loc[tod_mode_tapn_df['PERIOD'] == period] @@ -331,7 +338,7 @@ & (tod_mode_tapn_costs['DDIST'] < max_dist)] if len(mode_cut) == 0: - print "No taps within" + str(max_dist) + "miles for mode" + mode + print("No taps within {} miles for mode {}".format(max_dist, mode)) continue # select entries for the closest N taps to this TAZ for this mode and this time of day @@ -359,9 +366,9 @@ # fill missing toll values final_df['DTOLL'] = final_df['DTOLL'].fillna(0) -print 'writing drive access skim results' +print('writing drive access skim results') output_col_order = ['FTAZ', 'MODE', 'PERIOD', 'TTAP', 'TMAZ', 'TTAZ', 'DTIME', 'DDIST', 'DTOLL', 'WDIST'] final_df[output_col_order].to_csv(drive_tansit_skim_out_file, index=False) end_time = pytime.time() -print 'elapsed time in minutes: ' + str((end_time - start_time) / 60.0) +print('elapsed time in minutes: {}'.format((end_time - start_time) / 60.0)) diff --git a/model-files/scripts/skims/build_new_transit_line.py b/model-files/scripts/skims/build_new_transit_line.py index e7fa606b..326a79fe 100644 --- a/model-files/scripts/skims/build_new_transit_line.py +++ b/model-files/scripts/skims/build_new_transit_line.py @@ -27,7 +27,7 @@ node_map[int(line["OLD_NODE"])] = int(line["N"]) #next, read in the transit lines, change the node, and write out the results -f = open(out_line_file,'wb') +f = open(out_line_file,'w') f.write(";;<><>;;"+os.linesep) trn_line = "" trn_line_count = 0 diff --git a/model-files/scripts/skims/build_walk_transfer_bypass_links.py b/model-files/scripts/skims/build_walk_transfer_bypass_links.py index 227174a6..4c291a0b 100644 --- a/model-files/scripts/skims/build_walk_transfer_bypass_links.py +++ b/model-files/scripts/skims/build_walk_transfer_bypass_links.py @@ -50,9 +50,8 @@ def distance(x1,y1,x2,y2): #use ext space for pseudo-taps (901,000+) - leave 1,000 spots for externals pseudo_tap_counter = 901001 -tap_numbers = taps.keys() -tap_numbers.sort() -with open(output_node_file,'wb') as f: +tap_numbers = sorted(taps.keys()) +with open(output_node_file,'w') as f: for tap in tap_numbers: (x,y) = taps[tap] #offset tap by 7 feet diagonally so that we are close, but not overlapping @@ -62,7 +61,7 @@ def distance(x1,y1,x2,y2): pseudo_tap_counter += 1 # write crosswalk between tap and newly created pseudo tap node -with open(output_tap_xwalk_file, 'wb') as f: +with open(output_tap_xwalk_file, 'w') as f: for tap, pseudo_tap in taps.items(): f.write(','.join(map(str,[tap,pseudo_tap])) + os.linesep) @@ -77,7 +76,7 @@ def distance(x1,y1,x2,y2): node_coord[n] = (x,y) -with open(output_link_file,'wb') as f: +with open(output_link_file,'w') as f: #first write out pseudo-tap->stop links for tap in tap_links: (x1,y1) = node_coord[tap] diff --git a/model-files/scripts/skims/build_walk_transfer_links.py b/model-files/scripts/skims/build_walk_transfer_links.py index 10820326..09f87e27 100644 --- a/model-files/scripts/skims/build_walk_transfer_links.py +++ b/model-files/scripts/skims/build_walk_transfer_links.py @@ -18,11 +18,19 @@ taps_connection = sys.argv[1] taps_connectors = sys.argv[2] -f = open(taps_connectors,'wb') +f = open(taps_connectors,'w') finished_taps = {} -for line in open(taps_connection): +for line in open(taps_connection, 'r'): line = line.strip().split(',') - taps = (int(line[0]),int(line[1])) + try: + taps = (int(line[0]),int(line[1])) + except Exception as e: + # for some reason this file has lots of blank lines -- workaround this + if line == ['']: continue + # handle EOF + if line == ['\x1a']: continue + print("line=[{}]".format(line)) + raise e if not taps in finished_taps: #tap,tap,cntype,distance #write both directions diff --git a/model-files/scripts/skims/change_link_node_numbers.py b/model-files/scripts/skims/change_link_node_numbers.py index 66cc4551..5f9adf30 100644 --- a/model-files/scripts/skims/change_link_node_numbers.py +++ b/model-files/scripts/skims/change_link_node_numbers.py @@ -29,7 +29,7 @@ node_map[int(line["OLD_NODE"])] = int(line["N"]) #now read the link file, transfer the a/b nodes, and write the new file -f = open(out_link_file,'wb') +f = open(out_link_file,'w') for line in open(link_file): line = line.strip().split(',') if len(line) < 2: diff --git a/model-files/scripts/skims/create_emme_network.py b/model-files/scripts/skims/create_emme_network.py index a095d5d1..084c600f 100644 --- a/model-files/scripts/skims/create_emme_network.py +++ b/model-files/scripts/skims/create_emme_network.py @@ -5,7 +5,7 @@ In each scenario, a network is created in accordance with the Emme transaction files generated by the cube_to_emme_network_conversion.py script. -Usage: %EMME_PYTHON_PATH%\python create_emme_network.py +Usage: python create_emme_network.py Note that the Emme python must be used to have access to the Emme API [-p, --trn_path]: path to the trn folder, default is the @@ -105,7 +105,7 @@ def init_emme_project(root, title, port=59673, overwrite=False): except TcpConnectFailureError: # no running desktop pass _shutil.rmtree(emme_dir) - print "Creating Emme project folder with name: %s" % title + print("Creating Emme project folder with name: {}".format(title)) project_path = _app.create_project(root, title) desktop = _app.start( # will not close desktop when program ends project=project_path, user_initials="RSG", visible=True, port=port) @@ -141,6 +141,7 @@ def init_emme_project(root, title, port=59673, overwrite=False): } _os.mkdir(_join(project_root, "Database")) + print("Creating Emmebank with dimensions {}".format(dimensions)) emmebank = _eb.create(_join(project_root, "Database", "emmebank"), dimensions) emmebank.title = title emmebank.coord_unit_length = 0.000189394 # feet to miles @@ -159,10 +160,10 @@ def init_emme_project(root, title, port=59673, overwrite=False): desktop.data_explorer().add_database(emmebank.path) all_databases = desktop.data_explorer().databases() - print len(all_databases), "databases in project" + print(len(all_databases), "databases in project") # opening first database. There must be a better way to do this... for database in all_databases: - print database.name(), len(database.scenarios()) + print(database.name(), len(database.scenarios())) database.open() break project.save() @@ -179,7 +180,7 @@ def import_modes(input_dir, modeller, scenario_id): - scenario_id: int specifying scenario to import into Returns: None """ - print "importing modes" + print("importing modes from {}".format(emme_mode_transaction_file)) if modeller is None: modeller = _m.Modeller() scenario = modeller.emmebank.scenario(scenario_id) @@ -201,7 +202,7 @@ def import_network(input_dir, modeller, scenario_id): - scenario_id: int specifying scenario to import into Returns: None """ - print "importing network" + print("importing network from {}".format(emme_network_transaction_file)) if modeller is None: modeller = _m.Modeller() scenario = modeller.emmebank.scenario(scenario_id) @@ -223,7 +224,7 @@ def import_extra_node_attributes(input_dir, modeller, scenario_id): - scenario_id: int specifying scenario to import into Returns: None """ - print "importing node attributes" + print("importing node attributes from {}".format(extra_node_attr_file)) if modeller is None: modeller = _m.Modeller() scenario = modeller.emmebank.scenario(scenario_id) @@ -253,7 +254,7 @@ def import_extra_link_attributes(input_dir, modeller, scenario_id, update=False) also load the network fields. Should be True if not the first model iteration Returns: None """ - print "importing link attributes" + print("importing link attributes from {}".format(extra_link_attr_file)) if modeller is None: modeller = _m.Modeller() scenario = modeller.emmebank.scenario(scenario_id) @@ -307,7 +308,7 @@ def import_vehicles(input_dir, modeller, scenario_id): - scenario_id: int specifying scenario to import into Returns: None """ - print "importing transit vehicles" + print("importing transit vehicles from {}".format(emme_vehicle_transaction_file)) if modeller is None: modeller = _m.Modeller() scenario = modeller.emmebank.scenario(scenario_id) @@ -329,7 +330,7 @@ def import_transit_time_functions(input_dir, modeller, scenario_id): - scenario_id: int specifying scenario to import into Returns: None """ - print "importing transit time functions" + print("importing transit time functions from {}".format(emme_transit_time_function_file)) if modeller is None: modeller = _m.Modeller() scenario = modeller.emmebank.scenario(scenario_id) @@ -351,7 +352,7 @@ def import_transit_lines(input_dir, modeller, scenario_id): - scenario_id: int specifying scenario to import into Returns: None """ - print "importing transit network" + print("importing transit network from {}".format(emme_transit_network_file)) if modeller is None: modeller = _m.Modeller() scenario = modeller.emmebank.scenario(scenario_id) @@ -374,7 +375,7 @@ def import_extra_transit_line_attributes(input_dir, modeller, scenario_id): - scenario_id: int specifying scenario to import into Returns: None """ - print "importing extra transit line attributes" + print("importing extra transit line attributes from {}".format(extra_transit_line_attr_file)) if modeller is None: modeller = _m.Modeller() scenario = modeller.emmebank.scenario(scenario_id) @@ -402,7 +403,7 @@ def import_extra_transit_segment_attributes(input_dir, modeller, scenario_id): - scenario_id: int specifying scenario to import into Returns: None """ - print "importing extra transit segment attributes" + print("importing extra transit segment attributes from {}".format(extra_transit_segment_attr_file)) if modeller is None: modeller = _m.Modeller() scenario = modeller.emmebank.scenario(scenario_id) @@ -430,7 +431,7 @@ def import_station_attributes(input_dir, modeller, scenario_id): - scenario_id: int specifying scenario to import into Returns: None """ - print "importing station attributes" + print("importing station attributes from {}".format(station_extra_attributes_file)) if modeller is None: modeller = _m.Modeller() scenario = modeller.emmebank.scenario(scenario_id) @@ -485,7 +486,7 @@ def replace_route_for_lines_with_nntime_and_created_segments(network, input_dir) for idx, line in transit_line_df.iterrows(): if line['keep_line'] == 0: continue - print "Creating line %s (%s) with new links" % (line['line_name'], line['LINE']) + print("Creating line %s (%s) with new links" % (line['line_name'], line['LINE'])) transit_line = network.transit_line(line['line_name']) if transit_line is not None: network.delete_transit_line(line['line_name']) @@ -499,8 +500,8 @@ def replace_route_for_lines_with_nntime_and_created_segments(network, input_dir) break link = network.link(stop['node_id'], stop['next_node_id']) if link is None: - print "link from %s to %s doesn't exist, creating new link" % \ - (stop['node_id'], stop['next_node_id']) + print("link from %s to %s doesn't exist, creating new link" % \ + (stop['node_id'], stop['next_node_id'])) link = network.create_link(stop['node_id'], stop['next_node_id'], set([line_mode])) link.length = link.shape_length link['@trantime'] = link.length / line['XYSPEED'] * 60 # mi / mph * 60 min/mi = min @@ -544,8 +545,8 @@ def fill_transit_times_for_created_segments(network): segments_fixed += 1 if line.id not in lines_with_created_segments: lines_with_created_segments.append(line.id) - print "Number of transit lines modified: %s" % len(lines_with_created_segments) - print "Number of created segments assigned link trantime: %s" % segments_fixed + print("Number of transit lines modified: %s" % len(lines_with_created_segments)) + print("Number of created segments assigned link trantime: %s" % segments_fixed) def distribute_nntime_among_segments(segments_for_current_nntime, nntime): @@ -587,7 +588,7 @@ def distribute_nntime(network, input_dir): stop_attributes_path = _join(input_dir, 'all_stop_attributes.csv') stop_attributes_df = _pd.read_csv(stop_attributes_path) - print "Setting transit station-to-station times (NNTIME in Cube)" + print("Setting transit station-to-station times (NNTIME in Cube)") for line in network.transit_lines(): if (line['@uses_nntime'] == 0): @@ -654,7 +655,7 @@ def split_tap_connectors_to_prevent_walk(network): Returns: - None (network object is updated) """ - print "Splitting Tap Connectors" + print("Splitting Tap Connectors") tap_stops = _defaultdict(lambda: []) new_node_id = init_node_id(network) all_transit_modes = set([mode for mode in network.modes() if mode.type == "TRANSIT"]) @@ -754,12 +755,12 @@ def split_tap_connectors_to_prevent_walk(network): line_data.pop("vehicle"), itinerary) # copy line attributes back - for k, v in line_data.iteritems(): + for (k, v) in line_data.items(): new_line[k] = v # copy segment attributes back for seg in new_line.segments(include_hidden=True): data = seg_data.get((seg.i_node, seg.j_node, seg.loop_index), {}) - for k, v in data.iteritems(): + for (k, v) in data.items(): seg[k] = v # set boarding, alighting and dwell time on new tap access / egress segments for tap_ref in tap_segments: @@ -987,7 +988,7 @@ def apply_station_attributes(input_dir, network): tap_modes_serviced = list(set(tap_modes_serviced)) # unique entries # only need to split transfer node if tap serves more than one mode and has a station platform time if (len(tap_modes_serviced) > 1) & (tap['@stplatformtime'] > 0): - print 'Splitting transfer nodes for tap', tap_id, 'with modes', tap_modes_serviced + print('Splitting transfer nodes for tap', tap_id, 'with modes', tap_modes_serviced) for mode_count, mode in enumerate(tap_modes_serviced): # duplicate transfer node with an offset new_node_id += 1 @@ -1008,37 +1009,37 @@ def get_new_node_coordinates(point_num, old_x, old_y, radius, total_points): mode_count, transfer_node.x, transfer_node.y, 7, len(tap_modes_serviced)) new_tnode.x = new_coords[0] new_tnode.y = new_coords[1] - print 'Created node ', new_node_id, 'at x = ', new_coords[0], 'and y = ', new_coords[1] + print('Created node ', new_node_id, 'at x = ', new_coords[0], 'and y = ', new_coords[1]) # re-create all relevant outgoing transfer links for transfer_link in transfer_node.outgoing_links(): # see if this transfer link connects to a node that services transit lines modes_at_stop = [str(segment.line['#src_mode']) for segment in transfer_link.j_node.outgoing_segments()] modes_at_stop = list(set(['b' if stop_mode == 'x' else stop_mode for stop_mode in modes_at_stop])) - print 'mode: ', mode, ' modes_at_stop: ', modes_at_stop - if (transfer_link.j_node.id in tap_to_pseudo_tap_xwalk['emme_pseudo_tap'].astype('string').values): - print 'connects to transfer node' - if (mode in modes_at_stop) | (transfer_link.j_node.id in tap_to_pseudo_tap_xwalk['emme_pseudo_tap'].astype('string').values): + print('mode: ', mode, ' modes_at_stop: ', modes_at_stop) + if (transfer_link.j_node.id in tap_to_pseudo_tap_xwalk['emme_pseudo_tap'].astype(str).values): + print('connects to transfer node') + if (mode in modes_at_stop) | (transfer_link.j_node.id in tap_to_pseudo_tap_xwalk['emme_pseudo_tap'].astype(str).values): # create new link if this tranfer node services the mode or connects to another transfer node new_transfer_link = network.create_link(new_tnode.id, transfer_link.j_node, transfer_link.modes) for attr in link_attributes: new_transfer_link[attr] = transfer_link[attr] - print 'created link' + print('created link') # re-create all relevant incoming transfer links for transfer_link in transfer_node.incoming_links(): # see if this transfer link connects to a node that services transit lines modes_at_stop = [str(segment.line['#src_mode']) for segment in transfer_link.i_node.outgoing_segments()] modes_at_stop = list(set(['b' if stop_mode == 'x' else stop_mode for stop_mode in modes_at_stop])) - print 'mode: ', mode, ' modes_at_stop: ', modes_at_stop - if (transfer_link.i_node.id in tap_to_pseudo_tap_xwalk['emme_pseudo_tap'].astype('string').values): - print 'connects to transfer node' - if (mode in modes_at_stop) | (transfer_link.i_node.id in tap_to_pseudo_tap_xwalk['emme_pseudo_tap'].astype('string').values): + print('mode: ', mode, ' modes_at_stop: ', modes_at_stop) + if (transfer_link.i_node.id in tap_to_pseudo_tap_xwalk['emme_pseudo_tap'].astype(str).values): + print('connects to transfer node') + if (mode in modes_at_stop) | (transfer_link.i_node.id in tap_to_pseudo_tap_xwalk['emme_pseudo_tap'].astype(str).values): # create new link if this tranfer node services the mode or connects to another transfer node new_transfer_link = network.create_link(transfer_link.i_node, new_tnode.id, transfer_link.modes) for attr in link_attributes: new_transfer_link[attr] = transfer_link[attr] - print 'created link' + print('created link') # all old transfer links are replaced and need to be deleted for transfer_link in transfer_node.outgoing_links(): @@ -1048,8 +1049,8 @@ def get_new_node_coordinates(point_num, old_x, old_y, radius, total_points): network.delete_node(transfer_node_id) # need to connect newly created transfer nodes to eachother - for mode, tnode_id in new_tnode_mode_dict.iteritems(): - for next_mode, next_tnode_id in new_tnode_mode_dict.iteritems(): + for (mode, tnode_id) in new_tnode_mode_dict.items(): + for (next_mode, next_tnode_id) in new_tnode_mode_dict.items(): if tnode_id == next_tnode_id: # don't need to connect transfer node to itself continue @@ -1071,7 +1072,7 @@ def get_new_node_coordinates(point_num, old_x, old_y, radius, total_points): new_link['@trantime'] = tap['@stplatformtime'] / 60 new_link['@walktime'] = tap['@stplatformtime'] / 60 else: - print 'mode', mode, 'and new_mode', next_mode, "combination doesn't make sense for tap", tap_id + print('mode', mode, 'and new_mode', next_mode, "combination doesn't make sense for tap", tap_id) # walk transfer links are separated onto separate pseudo taps created in BuildTransitNetworks.job @@ -1101,7 +1102,7 @@ def get_new_node_coordinates(point_num, old_x, old_y, radius, total_points): # print "Number of nodes set with new platform time", len(stop_nodes_with_platform_time) # print "Number of walk links set with new walk time", len(bus_transfer_walk_links_overridden) - print "Number of walk links set with station platform time", len(tap_connectors_with_platform_time) + print("Number of walk links set with station platform time", len(tap_connectors_with_platform_time)) # print "Stop nodes with platform time", tap_connectors_with_platform_time # test_tap = network.node(4108) @@ -1148,7 +1149,7 @@ def create_time_period_scenario(modeller, scenario_id, root, period): # fix_bad_walktimes(network) scenario.publish_network(network) - print "applying fares" + print("applying fares") apply_fares = _apply_fares.ApplyFares() apply_fares.scenario = scenario apply_fares.dot_far_file = _join(root, "fares.far") @@ -1184,7 +1185,7 @@ def update_congested_link_times(modeller, scenario_id, root, period): input_dir = _join(root, "emme_network_transaction_files_{}".format(period)) emmebank = modeller.emmebank - print "updating scenario_id %s" % (scenario_id) + print("updating scenario_id %s" % (scenario_id)) import_extra_link_attributes(input_dir, modeller, scenario_id, update=True) import_extra_transit_segment_attributes(input_dir, modeller, scenario_id) @@ -1264,10 +1265,10 @@ def start_desktop(root, title="mtc_emme", port=59673): for period in time_periods: scenario_id = period_to_scenario_dict[period] if args.first_iteration == 'yes': - print "creating %s scenario" % period + print("creating %s scenario" % period) with _m.logbook_trace("Creating network for %s period " % (period)): create_time_period_scenario(modeller, scenario_id, args.trn_path, period) else: - print "updating %s scenario" % period + print("updating %s scenario" % period) with _m.logbook_trace("Updating network for %s period " % (period)): update_congested_link_times(modeller, scenario_id, args.trn_path, period) diff --git a/model-files/scripts/skims/cube_to_emme_network_conversion.py b/model-files/scripts/skims/cube_to_emme_network_conversion.py index 7709ef41..95d3474f 100644 --- a/model-files/scripts/skims/cube_to_emme_network_conversion.py +++ b/model-files/scripts/skims/cube_to_emme_network_conversion.py @@ -376,13 +376,13 @@ def create_and_write_mode_transaction_file(self, write_file=True): assert all(pd.Series(emme_transit_modes_dict).isin(mode_transaction_df['mode'].values)), \ - "Mode in the emme_transit_modes_dict is not listed in the mode transaction file" + "Mode in the emme_transit_modes_dict is not listed in the mode transaction file" if write_file == False: return mode_transaction_df - with open(self.emme_mode_transaction_file, 'w') as file: - file.write('t modes init\n') + with open(self.emme_mode_transaction_file, 'w', newline='') as file: + file.write('t modes init' + os.linesep) mode_transaction_df.to_csv(file, mode='a', sep=' ', index=False, header=False) file.close() return mode_transaction_df @@ -434,8 +434,8 @@ def create_and_write_vehicle_transaction_file(self, vehicletype_df, transit_line veh_output_cols = ['transaction', 'vehicle', 'descr', 'mode', 'fleet', 'caps', 'capt', 'ctc', 'cdc', 'etc', 'edc', 'auto'] - with open(self.emme_vehicle_transaction_file, 'w') as file: - file.write('t vehicles init\n') + with open(self.emme_vehicle_transaction_file, 'w', newline='') as file: + file.write('t vehicles init' + os.linesep) veh_transaction_df[veh_output_cols].to_csv(file, mode='a', sep=' ', index=False, header=False) file.close() @@ -663,10 +663,10 @@ def write_links_and_nodes_transaction_file(self, node_gdf, link_gdf, mode_transa node_gdf, node_transaction_cols = self.create_emme_nodes_input(node_gdf) link_gdf, link_transaction_cols = self.create_emme_links_input(link_gdf, mode_transaction_df) - with open(self.emme_network_transaction_file, 'w') as file: - file.write('t nodes init\n') + with open(self.emme_network_transaction_file, 'w', newline='') as file: + file.write('t nodes init' + os.linesep) node_gdf[node_transaction_cols].to_csv(file, mode='a', sep=' ', index=False, header=False) - file.write('t links init\n') + file.write('t links init' + os.linesep) link_gdf[link_transaction_cols].to_csv(file, mode='a', sep=' ', index=False, header=False) file.close() return node_gdf, link_gdf diff --git a/model-files/scripts/skims/skim_transit_network.py b/model-files/scripts/skims/skim_transit_network.py index be21ca9d..9c3235bb 100644 --- a/model-files/scripts/skims/skim_transit_network.py +++ b/model-files/scripts/skims/skim_transit_network.py @@ -53,7 +53,7 @@ import openmatrix as _omx def open_file(file_path, mode): return OmxMatrix(_omx.open_file(file_path, mode)) -except Exception, e: +except Exception as e: import omx as _omx def open_file(file_path, mode): return OmxMatrix(_omx.openFile(file_path, mode)) @@ -203,7 +203,7 @@ def connect_to_desktop(port=59673): Returns: - Emme desktop object from the specified port """ - print("port:", port) + print("connecting to Emme desktop via port:", port) desktop = _app.connect(port=port) return desktop @@ -220,6 +220,7 @@ def start_desktop(root, title="emme_full_run", port=59673): - Emme desktop object """ emme_project = _os.path.join(root, title, title + ".emp") + print("emme_project: {}".format(emme_project)) desktop = _app.start( # will not close desktop when program ends project=emme_project, user_initials="RSG", visible=True, port=port) return desktop @@ -508,7 +509,7 @@ def parse_num_processors(value): max_processors = _multiprocessing.cpu_count() if isinstance(value, int): return value - if isinstance(value, basestring): + if isinstance(value, str): if value == "MAX": return max_processors if _re.match("^[0-9]+$", value): @@ -752,7 +753,7 @@ def get_fare_modes(src_modes): specs = [] names = [] demand_matrix_template = "mfTRN_{set}_{period}" - for mode_name, parameters in skim_parameters.iteritems(): + for (mode_name, parameters) in skim_parameters.items(): spec = _copy(base_spec) spec["modes"] = parameters["modes"] # name = "%s_%s%s" % (period, a_name, mode_name) @@ -792,7 +793,7 @@ def get_fare_modes(src_modes): assign_transit = modeller.tool( "inro.emme.transit_assignment.extended_transit_assignment") add_volumes = False - for mode_name, parameters in skim_parameters.iteritems(): + for (mode_name, parameters) in skim_parameters.items(): spec = _copy(base_spec) name = "%s_%s" % (period, mode_name) spec["modes"] = parameters["modes"] @@ -800,6 +801,7 @@ def get_fare_modes(src_modes): spec["demand"] = "mfTRN_{set}_{period}".format(set=_set_dict[mode_name], period=period) # spec['od_results'] = {'total_impedance': 'mf{}_{}_IMPED'.format(period, mode_name)} spec["journey_levels"] = parameters["journey_levels"] + print("Running assign_transit with spec={} class_name={} add_volumes={} scenario={}".format(spec, name, add_volumes, scenario)) assign_transit(spec, class_name=name, add_volumes=add_volumes, scenario=scenario) add_volumes = True @@ -1084,7 +1086,7 @@ def save_per_iteration_flows(scenario): "inro.emme.transit_assignment.extended.network_results") for strat in scenario.transit_strategies.strat_files(): - print strat.name + print(strat.name) _, num, class_name = strat.name.split() attr_name = ("@%s_it%s" % (class_name, num)).lower() create_attr("TRANSIT_SEGMENT", attr_name, scenario=scenario, overwrite=True) @@ -1233,7 +1235,7 @@ def create_mapping(self, name, ids): exception_raised = False try: self.matrix.create_mapping(name, ids) # Emme 44 and above - except Exception, e: + except Exception as e: exception_raised = True if exception_raised: @@ -1248,7 +1250,7 @@ def create_matrix(self, key, obj, chunkshape, attrs): chunkshape=chunkshape, attrs=attrs ) - except Exception, e: + except Exception as e: exception_raised = True if exception_raised: # Emme 437 self.matrix.createMatrix( @@ -1308,7 +1310,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): def write_matrices(self, matrices): if isinstance(matrices, dict): - for key, matrix in matrices.iteritems(): + for (key, matrix) in matrices.items(): self.write_matrix(matrix, key) else: for matrix in matrices: diff --git a/model-files/scripts/skims/skim_zone_transfer.py b/model-files/scripts/skims/skim_zone_transfer.py index 23ea683f..5e9a998c 100644 --- a/model-files/scripts/skims/skim_zone_transfer.py +++ b/model-files/scripts/skims/skim_zone_transfer.py @@ -69,11 +69,19 @@ # shutil.copy2(skim_file, skim_file + '.bak') temp_skim_file = skim_file + '.tmp' -f = open(temp_skim_file,'wb') +f = open(temp_skim_file,'w') for line in open(skim_file): data = line.strip().split(',') for i in range(columns_to_transfer): - data[i] = transfer[i][int(data[i])] + try: + data[i] = transfer[i][int(data[i])] + except Exception as inst: + # for some reason this file has lots of blank lines -- workaround this + if data == ['']: continue + # handle EOF + if data == ['\x1a']: continue + print(data) + raise inst f.write(','.join(data) + os.linesep) f.close() diff --git a/model-files/scripts/skims/tap_lines.py b/model-files/scripts/skims/tap_lines.py index e79b9a77..bd9b51e8 100644 --- a/model-files/scripts/skims/tap_lines.py +++ b/model-files/scripts/skims/tap_lines.py @@ -24,7 +24,7 @@ start_time = pytime.time() -print 'reading transit lines from {}'.format(transit_line_file) +print('reading transit lines from {}'.format(transit_line_file)) linesByNode = dict() trn_line = "" for temp_line in open(transit_line_file): @@ -80,16 +80,16 @@ # linesByNode[n] = set() # linesByNode[n].add(lineName.replace('"',"")) -print 'reading tap connectors' +print('reading tap connectors') access_links = [] -with open(network_tap_links_file, 'rb') as csvfile: +with open(network_tap_links_file, 'r') as csvfile: tapreader = csv.reader(csvfile, skipinitialspace=True) for row in tapreader: access_links.append(row) -print 'reading zone sequence file' +print('reading zone sequence file') tapToSeqTap = dict() -with open(zone_seq_file, 'rb') as csvfile: +with open(zone_seq_file, 'r') as csvfile: tapreader = csv.DictReader(csvfile) for row in tapreader: node_id = int(row["N"]) @@ -121,7 +121,7 @@ linesByTap[tap].add(line) #write out tapLines file for CT-RAMP -f = file(tap_lines_file,"wt") +f = open(tap_lines_file,"w") f.write("TAP,LINES\n") for tap in linesByTap.keys(): lines = " ".join(list(linesByTap[tap])) @@ -131,4 +131,4 @@ print("wrote {}".format(tap_lines_file)) end_time = pytime.time() -print 'elapsed time in minutes: ' + str((end_time - start_time) / 60.0) +print('elapsed time in minutes: {}'.format((end_time - start_time) / 60.0)) diff --git a/utilities/cube_to_emme_network_conversion.py b/utilities/cube_to_emme_network_conversion.py index 6d774942..97a31ed7 100644 --- a/utilities/cube_to_emme_network_conversion.py +++ b/utilities/cube_to_emme_network_conversion.py @@ -45,32 +45,39 @@ # --------------------------------------------- Methods -------------------------------------------- class emme_network_conversion: def __init__(self, cube_network_folder, period): - self.period = period - self.emme_network_transaction_folder = os.path.join( - cube_network_folder,"emme_network_transaction_files_{}".format(period)) - self.link_shapefile = os.path.join(cube_network_folder, "mtc_transit_network_{}_CONG_links.DBF".format(period)) - self.node_shapefile = os.path.join(cube_network_folder, "mtc_transit_network_{}_CONG_nodes.DBF".format(period)) - # transit_lin_file = r"E:\projects\clients\marin\2015_test_2019_02_13\trn\transitLines_new_nodes.lin" - self.transit_lin_file = os.path.join(cube_network_folder, "transitLines_new_nodes.lin") - self.transit_system_file = os.path.join(cube_network_folder, "transitSystem.PTS") - self.transit_SET3_file = os.path.join(cube_network_folder, "transitFactors_SET3.fac") - self.node_id_crosswalk_file = os.path.join(self.emme_network_transaction_folder, "node_id_crosswalk.csv") - self.emme_mode_transaction_file = os.path.join(self.emme_network_transaction_folder, "emme_modes.txt") - self.emme_vehicle_transaction_file = os.path.join(self.emme_network_transaction_folder, "emme_vehicles.txt") - self.emme_network_transaction_file = os.path.join(self.emme_network_transaction_folder, "emme_network.txt") - self.extra_node_attr_file = os.path.join(self.emme_network_transaction_folder, "emme_extra_node_attributes.txt") - self.extra_link_attr_file = os.path.join(self.emme_network_transaction_folder, "emme_extra_link_attributes.txt") - self.update_extra_link_attr_file = os.path.join(self.emme_network_transaction_folder, "emme_update_extra_link_attributes.txt") - self.emme_transit_network_file = os.path.join(self.emme_network_transaction_folder, "emme_transit_lines.txt") - self.extra_transit_line_attr_file = os.path.join(self.emme_network_transaction_folder, "emme_extra_line_attributes.txt") - self.extra_transit_segment_attr_file = os.path.join(self.emme_network_transaction_folder, "emme_extra_segment_attributes.txt") - self.emme_transit_time_function_file = os.path.join(self.emme_network_transaction_folder, "emme_transit_time_function.txt") - self.all_stop_attributes_file = os.path.join(self.emme_network_transaction_folder, "all_stop_attributes.csv") - self.all_transit_lines_file = os.path.join(self.emme_network_transaction_folder, "all_transit_lines.csv") + self.period = period + self.emme_network_transaction_folder = os.path.join(cube_network_folder, "emme_network_transaction_files_{}".format(period)) + self.link_shapefile = os.path.join(cube_network_folder, "mtc_transit_network_{}_CONG_links.DBF".format(period)) + self.node_shapefile = os.path.join(cube_network_folder, "mtc_transit_network_{}_CONG_nodes.DBF".format(period)) + self.transit_lin_file = os.path.join(cube_network_folder, "transitLines_new_nodes.lin") + self.transit_system_file = os.path.join(cube_network_folder, "transitSystem.PTS") + self.transit_SET3_file = os.path.join(cube_network_folder, "transitFactors_SET3.fac") + + self.node_id_crosswalk_file = os.path.join(self.emme_network_transaction_folder, "node_id_crosswalk.csv") + self.emme_mode_transaction_file = os.path.join(self.emme_network_transaction_folder, "emme_modes.txt") + self.emme_vehicle_transaction_file = os.path.join(self.emme_network_transaction_folder, "emme_vehicles.txt") + self.emme_network_transaction_file = os.path.join(self.emme_network_transaction_folder, "emme_network.txt") + self.extra_node_attr_file = os.path.join(self.emme_network_transaction_folder, "emme_extra_node_attributes.txt") + self.extra_link_attr_file = os.path.join(self.emme_network_transaction_folder, "emme_extra_link_attributes.txt") + self.update_extra_link_attr_file = os.path.join(self.emme_network_transaction_folder, "emme_update_extra_link_attributes.txt") + self.emme_transit_network_file = os.path.join(self.emme_network_transaction_folder, "emme_transit_lines.txt") + self.extra_transit_line_attr_file = os.path.join(self.emme_network_transaction_folder, "emme_extra_line_attributes.txt") + self.extra_transit_segment_attr_file = os.path.join(self.emme_network_transaction_folder, "emme_extra_segment_attributes.txt") + self.emme_transit_time_function_file = os.path.join(self.emme_network_transaction_folder, "emme_transit_time_function.txt") + self.all_stop_attributes_file = os.path.join(self.emme_network_transaction_folder, "all_stop_attributes.csv") + self.all_transit_lines_file = os.path.join(self.emme_network_transaction_folder, "all_transit_lines.csv") def load_input_data(self): + """ + Reads node and link shapefiles as geodataframes and verifies that extra_node_attributes, extra_link_attributes + are present as columns. + + Returns both geodataframes. + """ print("Loading input data for", self.period, "period") + print("- Reading {}".format(self.node_shapefile)) node_gdf = geopandas.read_file(self.node_shapefile) + print("- Reading {}".format(self.link_shapefile)) link_gdf = geopandas.read_file(self.link_shapefile) for attr in extra_node_attributes: diff --git a/utilities/find-compatible-python.py b/utilities/find-compatible-python.py new file mode 100644 index 00000000..2261314d --- /dev/null +++ b/utilities/find-compatible-python.py @@ -0,0 +1,88 @@ +# Helper script to reveal requirements for python packages/versions. +# This is uesful because installing Emme python packages brings in: +# gdal-2.3.3 +# fiona 1.8.5 +# shapely-1.6.4.post1 +# numpy-1.17.0 +# pandas-0.24.2 +# pyproj-1.9.6 +# +# Trying to find packages that are compatible -- +# -- looks like geopandas-0.6.3 works; geopandas-0.70 requires pyproj >=2.2.0 +# -- looks like scipy-1.7.3 works (requires numpy (<1.23.0,>=1.16.5)) +# +# Found geopandsa-0.6.2 on Gohlke's site (https://www.lfd.uci.edu/~gohlke/pythonlibs/) +# and saved to M:\Software\Python\geopandas-0.6.2-py2.py3-none-any.whl +import argparse, requests,sys + +PACKAGE_VERSIONS = { + "geopandas":[ + "0.10.2", + "0.10.1", + "0.10.0", + "0.9.0", + "0.8.2", + "0.8.1", + "0.8.0", + "0.7.0", + "0.6.3", + "0.6.2", + "0.6.1", + "0.6.0", + "0.5.1", + "0.5.0", + "0.4.1", + "0.4.0" + ], + "scipy":[ + "1.8.0", + "1.7.3", + "1.7.2", + "1.7.1", + "1.7.0", + "1.6.3", + "1.6.2", + "1.6.1", + "1.6.0", + "1.5.4", + "1.5.3", + "1.5.2", + "1.5.1", + "1.5.0", + "1.4.1", + "1.4.0", + "1.3.3", + "1.3.2", + "1.3.1", + "1.3.0", + "1.2.3", + "1.2.2", + "1.2.1", + "1.2.0", + "1.1.0"] +} +if __name__ == '__main__': + parser = argparse.ArgumentParser(description="Grabs information about some python package requirements", formatter_class=argparse.RawDescriptionHelpFormatter,) + parser.add_argument("package", choices=PACKAGE_VERSIONS.keys()) + args = parser.parse_args() + + for version in PACKAGE_VERSIONS[args.package]: + print("checking {} version {}".format(args.package,version)) + url = "https://pypi.python.org/pypi/{}/{}/json".format(args.package, version) + r = requests.get(url) + print("received: {}".format(r.status_code)) + data = r.json() + if 'info' not in data.keys(): + print("No info for this version") + continue + if 'requires_dist' not in data['info'].keys(): + print("No requires_dist in info for this version") + continue + requires_dist = r.json()['info']['requires_dist'] + if type(requires_dist) != list: + print("requires_dist is not a list: {}".format(requires_dist)) + continue + for req in requires_dist: + print(" {}".format(req)) + + sys.exit() \ No newline at end of file