api.py 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. """Main application"""
  2. import re
  3. import requests
  4. from bs4 import BeautifulSoup
  5. from app import BASE_URL, HEADERS, LOGGER, RESOURCE_IDS
  6. def download_resources(state_id, resource_id):
  7. """Download the resource list"""
  8. # return read_resources()
  9. response = requests.get(
  10. '{}listed/stateresources/{}/{}'.format(BASE_URL, state_id, RESOURCE_IDS[resource_id]),
  11. headers=HEADERS
  12. )
  13. return parse_resources(response.text)
  14. def read_resources():
  15. """Read resource file"""
  16. with open('resources.html') as file:
  17. return parse_resources(file)
  18. def parse_resources(html):
  19. """Read the resources left"""
  20. soup = BeautifulSoup(html, 'html.parser')
  21. regions_tree = soup.find_all(class_='list_link')
  22. regions = {}
  23. for region_tree in regions_tree:
  24. region_id = int(region_tree['user'])
  25. columns = region_tree.find_all('td')
  26. regions[region_id] = {
  27. 'region_name': re.sub('Factories: .*$', '', columns[1].text),
  28. 'explored': float(columns[2].string),
  29. 'maximum': int(float(columns[3].string)),
  30. 'deep_exploration': int(columns[4].string),
  31. 'limit_left': int(columns[5].string),
  32. }
  33. return regions
  34. def refill(state_id, capital_id, resource_id, alt):
  35. """Main function"""
  36. resource_name = RESOURCE_IDS[resource_id]
  37. LOGGER.info('state %6s: start refill for %s, alt: %s', state_id, resource_name, alt)
  38. # Check location
  39. # response = requests.get(
  40. # '{}main/content'.format(BASE_URL),
  41. # headers=HEADERS
  42. # )
  43. # soup = BeautifulSoup(response.text, 'html.parser')
  44. # state_div = soup.find_all('div', {'class': 'index_case_50'})[1]
  45. # action = state_div.findChild()['action']
  46. # current_state_id = int(re.sub('.*/', '', action))
  47. # LOGGER.info('Current state %s', current_state_id)
  48. params = {}
  49. # if current_state_id != state_id:
  50. # params['alt'] = True
  51. if alt:
  52. params['alt'] = True
  53. json_data = {
  54. 'tmp_gov': resource_id
  55. }
  56. requests.post(
  57. '{}parliament/donew/42/{}/0'.format(BASE_URL, resource_id),
  58. headers=HEADERS,
  59. params=params,
  60. json=json_data
  61. )
  62. LOGGER.info('state %6s: created exploration law for %s', state_id, resource_name)
  63. response = requests.get(
  64. '{}parliament/index/{}'.format(BASE_URL, capital_id),
  65. headers=HEADERS
  66. )
  67. soup = BeautifulSoup(response.text, 'html.parser')
  68. active_laws = soup.find('div', {'id': 'parliament_active_laws'})
  69. exploration_laws = active_laws.findAll(
  70. text='Resources exploration: state, {} resources'.format(resource_name)
  71. )
  72. LOGGER.info('state %6s: number of exploration laws: %s', state_id, len(exploration_laws))
  73. for exploration_law in exploration_laws:
  74. action = exploration_law.parent.parent['action']
  75. LOGGER.info('state %6s: vote for law: %s', state_id, action)
  76. action = action.replace('law', 'votelaw')
  77. result = requests.post(
  78. '{}{}/pro'.format(BASE_URL, action),
  79. params=params,
  80. headers=HEADERS
  81. )
  82. LOGGER.info('Response: %s', result.text)