api.py 3.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. """Main application"""
  2. import re
  3. from datetime import datetime
  4. import requests
  5. from bs4 import BeautifulSoup
  6. from app import BASE_URL, HEADERS, LOGGER, RESOURCE_IDS, RESOURCE_NAMES
  7. def download_deep_explorations(region_id):
  8. """Download the deep explorations list"""
  9. # return read_deep_explorations()
  10. response = requests.get(
  11. '{}listed/upgrades/{}'.format(BASE_URL, region_id),
  12. headers=HEADERS
  13. )
  14. return parse_deep_explorations(response.text)
  15. def read_deep_explorations():
  16. """Read deep_exploration file"""
  17. with open('deep_explorations.html') as file:
  18. return parse_deep_explorations(file)
  19. def parse_deep_explorations(html):
  20. """Read the deep_explorations left"""
  21. soup = BeautifulSoup(html, 'html.parser')
  22. deep_explorations_tree = soup.find_all(class_='list_link')
  23. deep_explorations = {}
  24. for deep_exploration_tree in deep_explorations_tree:
  25. deep_exploration_id = int(deep_exploration_tree['user'])
  26. columns = deep_exploration_tree.find_all('td')
  27. deep_explorations[deep_exploration_id] = {
  28. 'resource_type': RESOURCE_NAMES[columns[1].text.replace(' resources', '').lower()],
  29. 'until_date_time': datetime.fromtimestamp(int(columns[2]['rat'])),
  30. }
  31. return deep_explorations
  32. def deep_explorate(state_id, region_id, resource_type, amount, alt):
  33. """Main function"""
  34. params = {}
  35. if alt:
  36. params['alt'] = True
  37. response = requests.get(
  38. '{}main/content'.format(BASE_URL),
  39. headers=HEADERS,
  40. params=params
  41. )
  42. if response.status_code != 200:
  43. LOGGER.warning('Error %s in response', response.status_code)
  44. soup = BeautifulSoup(response.text, 'html.parser')
  45. state_div = soup.find_all('div', {'class': 'index_case_50'})[1]
  46. action = state_div.findChild()['action']
  47. current_state_id = int(re.sub('.*/', '', action))
  48. LOGGER.info(
  49. '%s: region belongs to state %s, current state %s',
  50. region_id, state_id, current_state_id
  51. )
  52. if current_state_id == state_id:
  53. json_data = {
  54. 'tmp_gov': '{}_{}'.format(resource_type, amount)
  55. }
  56. requests.post(
  57. '{}parliament/donew/34/{}_{}/{}'.format(
  58. BASE_URL, resource_type, amount, region_id
  59. ),
  60. headers=HEADERS,
  61. params=params,
  62. json=json_data
  63. )
  64. LOGGER.info(
  65. '%s: created deep exploration law for %s',
  66. region_id, RESOURCE_IDS[resource_type]
  67. )
  68. response = requests.get(
  69. '{}parliament/index/{}'.format(BASE_URL, region_id),
  70. headers=HEADERS
  71. )
  72. soup = BeautifulSoup(response.text, 'html.parser')
  73. active_laws = soup.find('div', {'id': 'parliament_active_laws'})
  74. for exploration_law in active_laws.findAll(text=re.compile('Deep exploration,')):
  75. action = exploration_law.parent.parent['action']
  76. action = action.replace('law', 'votelaw')
  77. result = requests.post(
  78. '{}{}/pro'.format(BASE_URL, action),
  79. params=params,
  80. headers=HEADERS
  81. )
  82. LOGGER.info('Response: %s', result.text)
  83. LOGGER.info(
  84. '%s: accepted deep exploration law for %s',
  85. region_id, RESOURCE_IDS[resource_type]
  86. )